code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import re
import subprocess
import sys
__magic_name__ : int = subprocess.check_output('''git merge-base main HEAD'''.split()).decode('''utf-8''')
__magic_name__ : Any = subprocess.check_output(F'git diff --name-only {fork_point_sha}'.split()).decode('''utf-8''').split()
__magic_name__ : int = '''|'''.join(sys.argv[1:])
__magic_name__ : Tuple = re.compile(rF'^({joined_dirs}).*?\.py$')
__magic_name__ : int = [x for x in modified_files if regex.match(x)]
print(''' '''.join(relevant_modified_files), end='''''')
| 280 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
def lowercase ( self : int ):
_snake_case = tempfile.mkdtemp()
_snake_case = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
_snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
_snake_case = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
'''image_std''': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
_snake_case = os.path.join(self.tmpdirname , _lowerCamelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_lowerCamelCase , _lowerCamelCase )
def lowercase ( self : List[Any] , **_lowerCamelCase : Tuple ):
return BertTokenizer.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def lowercase ( self : str , **_lowerCamelCase : Tuple ):
return BertTokenizerFast.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def lowercase ( self : Dict , **_lowerCamelCase : Tuple ):
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def lowercase ( self : str ):
shutil.rmtree(self.tmpdirname )
def lowercase ( self : List[Any] ):
_snake_case = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_snake_case = [Image.fromarray(np.moveaxis(_lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowercase ( self : Tuple ):
_snake_case = self.get_tokenizer()
_snake_case = self.get_rust_tokenizer()
_snake_case = self.get_image_processor()
_snake_case = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
_snake_case = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=_lowerCamelCase )
_snake_case = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
_snake_case = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _lowerCamelCase )
self.assertIsInstance(processor_fast.tokenizer , _lowerCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _lowerCamelCase )
self.assertIsInstance(processor_fast.image_processor , _lowerCamelCase )
def lowercase ( self : int ):
_snake_case = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_snake_case = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
_snake_case = self.get_image_processor(do_normalize=_lowerCamelCase , padding_value=1.0 )
_snake_case = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowerCamelCase )
def lowercase ( self : List[str] ):
_snake_case = self.get_image_processor()
_snake_case = self.get_tokenizer()
_snake_case = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
_snake_case = self.prepare_image_inputs()
_snake_case = image_processor(_lowerCamelCase , return_tensors='''np''' )
_snake_case = processor(images=_lowerCamelCase , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowercase ( self : List[str] ):
_snake_case = self.get_image_processor()
_snake_case = self.get_tokenizer()
_snake_case = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
_snake_case = '''lower newer'''
_snake_case = processor(text=_lowerCamelCase )
_snake_case = tokenizer(_lowerCamelCase , padding='''max_length''' , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowercase ( self : Any ):
_snake_case = self.get_image_processor()
_snake_case = self.get_tokenizer()
_snake_case = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
_snake_case = '''lower newer'''
_snake_case = self.prepare_image_inputs()
_snake_case = processor(text=_lowerCamelCase , images=_lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(_lowerCamelCase ):
processor()
def lowercase ( self : Optional[Any] ):
_snake_case = self.get_image_processor()
_snake_case = self.get_tokenizer()
_snake_case = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
_snake_case = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_snake_case = processor.batch_decode(_lowerCamelCase )
_snake_case = tokenizer.batch_decode(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
def lowercase ( self : Optional[Any] ):
_snake_case = self.get_image_processor()
_snake_case = self.get_tokenizer()
_snake_case = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
_snake_case = '''lower newer'''
_snake_case = self.prepare_image_inputs()
_snake_case = processor(text=_lowerCamelCase , images=_lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 224 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
snake_case_ : List[str] = {
"""configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""],
"""processing_trocr""": ["""TrOCRProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : List[Any] = [
"""TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrOCRForCausalLM""",
"""TrOCRPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
snake_case_ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 292 |
"""simple docstring"""
def lowercase_ ( _lowercase : list , _lowercase : int , _lowercase : int = 0 , _lowercase : int = 0 ):
'''simple docstring'''
UpperCAmelCase : Tuple = right or len(_lowercase ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(_lowercase , _lowercase , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 292 | 1 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = tempfile.mkdtemp()
_lowerCAmelCase = BlipImageProcessor()
_lowerCAmelCase = GPTaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-GPT2Model""" )
_lowerCAmelCase = BertTokenizerFast.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
_lowerCAmelCase = InstructBlipProcessor(lowercase_ , lowercase_ , lowercase_ )
processor.save_pretrained(self.tmpdirname )
def _lowercase ( self , **_lowercase ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase_ ).tokenizer
def _lowercase ( self , **_lowercase ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase_ ).image_processor
def _lowercase ( self , **_lowercase ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase_ ).qformer_tokenizer
def _lowercase ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_lowerCAmelCase = [Image.fromarray(np.moveaxis(lowercase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
_lowerCAmelCase = self.get_image_processor(do_normalize=lowercase_ , padding_value=1.0 )
_lowerCAmelCase = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=lowercase_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowercase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowercase_ )
self.assertIsInstance(processor.qformer_tokenizer , lowercase_ )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.get_image_processor()
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = self.get_qformer_tokenizer()
_lowerCAmelCase = InstructBlipProcessor(
tokenizer=lowercase_ , image_processor=lowercase_ , qformer_tokenizer=lowercase_ )
_lowerCAmelCase = self.prepare_image_inputs()
_lowerCAmelCase = image_processor(lowercase_ , return_tensors="""np""" )
_lowerCAmelCase = processor(images=lowercase_ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.get_image_processor()
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = self.get_qformer_tokenizer()
_lowerCAmelCase = InstructBlipProcessor(
tokenizer=lowercase_ , image_processor=lowercase_ , qformer_tokenizer=lowercase_ )
_lowerCAmelCase = '''lower newer'''
_lowerCAmelCase = processor(text=lowercase_ )
_lowerCAmelCase = tokenizer(lowercase_ , return_token_type_ids=lowercase_ )
_lowerCAmelCase = qformer_tokenizer(lowercase_ , return_token_type_ids=lowercase_ )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor["""qformer_""" + key] )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.get_image_processor()
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = self.get_qformer_tokenizer()
_lowerCAmelCase = InstructBlipProcessor(
tokenizer=lowercase_ , image_processor=lowercase_ , qformer_tokenizer=lowercase_ )
_lowerCAmelCase = '''lower newer'''
_lowerCAmelCase = self.prepare_image_inputs()
_lowerCAmelCase = processor(text=lowercase_ , images=lowercase_ )
self.assertListEqual(
list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , )
# test if it raises when no input is passed
with pytest.raises(lowercase_ ):
processor()
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.get_image_processor()
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = self.get_qformer_tokenizer()
_lowerCAmelCase = InstructBlipProcessor(
tokenizer=lowercase_ , image_processor=lowercase_ , qformer_tokenizer=lowercase_ )
_lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowerCAmelCase = processor.batch_decode(lowercase_ )
_lowerCAmelCase = tokenizer.batch_decode(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.get_image_processor()
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = self.get_qformer_tokenizer()
_lowerCAmelCase = InstructBlipProcessor(
tokenizer=lowercase_ , image_processor=lowercase_ , qformer_tokenizer=lowercase_ )
_lowerCAmelCase = '''lower newer'''
_lowerCAmelCase = self.prepare_image_inputs()
_lowerCAmelCase = processor(text=lowercase_ , images=lowercase_ )
self.assertListEqual(
list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , )
| 5 |
"""simple docstring"""
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : List[str] , lowercase_ : str , lowercase_ : List[str]=13 , lowercase_ : int=30 , lowercase_ : Optional[int]=2 , lowercase_ : List[str]=3 , lowercase_ : Optional[Any]=True , lowercase_ : Optional[Any]=True , lowercase_ : Optional[int]=32 , lowercase_ : Tuple=5 , lowercase_ : str=4 , lowercase_ : Optional[int]=37 , lowercase_ : Tuple="gelu" , lowercase_ : List[str]=0.1 , lowercase_ : Dict=0.1 , lowercase_ : List[Any]=10 , lowercase_ : Optional[int]=0.02 , lowercase_ : List[str]=None , lowercase_ : Optional[Any]=2 , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = parent
SCREAMING_SNAKE_CASE_ : Any = batch_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = image_size
SCREAMING_SNAKE_CASE_ : int = patch_size
SCREAMING_SNAKE_CASE_ : Optional[int] = num_channels
SCREAMING_SNAKE_CASE_ : Union[str, Any] = is_training
SCREAMING_SNAKE_CASE_ : List[str] = use_labels
SCREAMING_SNAKE_CASE_ : str = hidden_size
SCREAMING_SNAKE_CASE_ : Dict = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE_ : Optional[Any] = intermediate_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE_ : Dict = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : str = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Dict = type_sequence_label_size
SCREAMING_SNAKE_CASE_ : Any = initializer_range
SCREAMING_SNAKE_CASE_ : Dict = scope
SCREAMING_SNAKE_CASE_ : Optional[Any] = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE_ : Optional[Any] = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE_ : int = num_patches + 1
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
SCREAMING_SNAKE_CASE_ : Optional[Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size)
SCREAMING_SNAKE_CASE_ : Tuple = self.get_config()
return config, pixel_values, labels
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowercase_ : Optional[Any] , lowercase_ : List[Any] , lowercase_ : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = ViTModel(config=lowercase_)
model.to(lowercase_)
model.eval()
SCREAMING_SNAKE_CASE_ : Dict = model(lowercase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : Dict , lowercase_ : Union[str, Any] , lowercase_ : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = ViTForMaskedImageModeling(config=lowercase_)
model.to(lowercase_)
model.eval()
SCREAMING_SNAKE_CASE_ : List[Any] = model(lowercase_)
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size))
# test greyscale images
SCREAMING_SNAKE_CASE_ : List[Any] = 1
SCREAMING_SNAKE_CASE_ : List[str] = ViTForMaskedImageModeling(lowercase_)
model.to(lowercase_)
model.eval()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
SCREAMING_SNAKE_CASE_ : int = model(lowercase_)
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size))
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : Dict , lowercase_ : Optional[int] , lowercase_ : Union[str, Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.type_sequence_label_size
SCREAMING_SNAKE_CASE_ : Tuple = ViTForImageClassification(lowercase_)
model.to(lowercase_)
model.eval()
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(lowercase_ , labels=lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
SCREAMING_SNAKE_CASE_ : Optional[int] = 1
SCREAMING_SNAKE_CASE_ : Tuple = ViTForImageClassification(lowercase_)
model.to(lowercase_)
model.eval()
SCREAMING_SNAKE_CASE_ : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
SCREAMING_SNAKE_CASE_ : Dict = model(lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
) : Dict = config_and_inputs
SCREAMING_SNAKE_CASE_ : Dict = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
__UpperCamelCase = (
{"feature-extraction": ViTModel, "image-classification": ViTForImageClassification}
if is_torch_available()
else {}
)
__UpperCamelCase = True
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = ViTModelTester(self)
SCREAMING_SNAKE_CASE_ : Optional[Any] = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=37)
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''')
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : List[Any] = model_class(lowercase_)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
SCREAMING_SNAKE_CASE_ : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase_ , nn.Linear))
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : Optional[int] = model_class(lowercase_)
SCREAMING_SNAKE_CASE_ : Any = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_ : str = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_ : str = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowercase_)
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_)
@slow
def _SCREAMING_SNAKE_CASE ( self : Dict):
'''simple docstring'''
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ : Optional[int] = ViTModel.from_pretrained(lowercase_)
self.assertIsNotNone(lowercase_)
def _A () -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
return ViTImageProcessor.from_pretrained('''google/vit-base-patch16-224''') if is_vision_available() else None
@slow
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = ViTForImageClassification.from_pretrained('''google/vit-base-patch16-224''').to(lowercase_)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.default_image_processor
SCREAMING_SNAKE_CASE_ : Optional[int] = prepare_img()
SCREAMING_SNAKE_CASE_ : Any = image_processor(images=lowercase_ , return_tensors='''pt''').to(lowercase_)
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Dict = model(**lowercase_)
# verify the logits
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape , lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor([-0.27_44, 0.82_15, -0.08_36]).to(lowercase_)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1e-4))
@slow
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = ViTModel.from_pretrained('''facebook/dino-vits8''').to(lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = ViTImageProcessor.from_pretrained('''facebook/dino-vits8''' , size=480)
SCREAMING_SNAKE_CASE_ : Optional[Any] = prepare_img()
SCREAMING_SNAKE_CASE_ : List[str] = image_processor(images=lowercase_ , return_tensors='''pt''')
SCREAMING_SNAKE_CASE_ : int = inputs.pixel_values.to(lowercase_)
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : List[str] = model(lowercase_ , interpolate_pos_encoding=lowercase_)
# verify the logits
SCREAMING_SNAKE_CASE_ : int = torch.Size((1, 3601, 384))
self.assertEqual(outputs.last_hidden_state.shape , lowercase_)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.tensor(
[[4.23_40, 4.39_06, -6.66_92], [4.54_63, 1.89_28, -6.72_57], [4.44_29, 0.84_96, -5.85_85]]).to(lowercase_)
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowercase_ , atol=1e-4))
@slow
@require_accelerate
@require_torch_gpu
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ViTModel.from_pretrained('''facebook/dino-vits8''' , torch_dtype=torch.floataa , device_map='''auto''')
SCREAMING_SNAKE_CASE_ : List[Any] = self.default_image_processor
SCREAMING_SNAKE_CASE_ : Tuple = prepare_img()
SCREAMING_SNAKE_CASE_ : List[str] = image_processor(images=lowercase_ , return_tensors='''pt''')
SCREAMING_SNAKE_CASE_ : str = inputs.pixel_values.to(lowercase_)
# forward pass to make sure inference works in fp16
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Dict = model(lowercase_)
| 512 | 0 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__snake_case : Optional[Any] = logging.get_logger(__name__)
__snake_case : Optional[Any] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
def _lowercase ( lowerCamelCase__ : Any, lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : List[str], lowerCamelCase__ : str, lowerCamelCase__ : Dict ):
for attribute in key.split("." ):
_a = getattr(lowerCAmelCase_, lowerCAmelCase_ )
if weight_type is not None:
_a = getattr(lowerCAmelCase_, lowerCAmelCase_ ).shape
else:
_a = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
_a = value
elif weight_type == "weight_g":
_a = value
elif weight_type == "weight_v":
_a = value
elif weight_type == "bias":
_a = value
else:
_a = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def _lowercase ( lowerCamelCase__ : List[Any], lowerCamelCase__ : Tuple, lowerCamelCase__ : Any ):
_a = []
_a = fairseq_model.state_dict()
_a = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
_a = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, hf_model.config.feat_extract_norm == "group", )
_a = True
else:
for key, mapped_key in MAPPING.items():
_a = '''hubert.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key
if key in name or (key.split("w2v_model." )[-1] == name.split("." )[0] and not is_finetuned):
_a = True
if "*" in mapped_key:
_a = name.split(lowerCAmelCase_ )[0].split("." )[-2]
_a = mapped_key.replace("*", lowerCAmelCase_ )
if "weight_g" in name:
_a = '''weight_g'''
elif "weight_v" in name:
_a = '''weight_v'''
elif "weight" in name:
_a = '''weight'''
elif "bias" in name:
_a = '''bias'''
else:
_a = None
set_recursively(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
continue
if not is_used:
unused_weights.append(lowerCAmelCase_ )
logger.warning(F'''Unused weights: {unused_weights}''' )
def _lowercase ( lowerCamelCase__ : List[str], lowerCamelCase__ : Tuple, lowerCamelCase__ : Optional[Any], lowerCamelCase__ : Dict, lowerCamelCase__ : Optional[Any] ):
_a = full_name.split("conv_layers." )[-1]
_a = name.split("." )
_a = int(items[0] )
_a = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
_a = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
_a = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
_a = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
_a = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(lowerCAmelCase_ )
@torch.no_grad()
def _lowercase ( lowerCamelCase__ : List[Any], lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : str=None, lowerCamelCase__ : List[Any]=None, lowerCamelCase__ : Optional[Any]=True ):
if config_path is not None:
_a = HubertConfig.from_pretrained(lowerCAmelCase_ )
else:
_a = HubertConfig()
if is_finetuned:
if dict_path:
_a = Dictionary.load(lowerCAmelCase_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_a = target_dict.pad_index
_a = target_dict.bos_index
_a = target_dict.eos_index
_a = len(target_dict.symbols )
_a = os.path.join(lowerCAmelCase_, "vocab.json" )
if not os.path.isdir(lowerCAmelCase_ ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(lowerCAmelCase_ ) )
return
os.makedirs(lowerCAmelCase_, exist_ok=lowerCAmelCase_ )
with open(lowerCAmelCase_, "w", encoding="utf-8" ) as vocab_handle:
json.dump(target_dict.indices, lowerCAmelCase_ )
_a = WavaVecaCTCTokenizer(
lowerCAmelCase_, unk_token=target_dict.unk_word, pad_token=target_dict.pad_word, bos_token=target_dict.bos_word, eos_token=target_dict.eos_word, word_delimiter_token="|", do_lower_case=lowerCAmelCase_, )
_a = True if config.feat_extract_norm == '''layer''' else False
_a = WavaVecaFeatureExtractor(
feature_size=1, sampling_rate=16_000, padding_value=0, do_normalize=lowerCAmelCase_, return_attention_mask=lowerCAmelCase_, )
_a = WavaVecaProcessor(feature_extractor=lowerCAmelCase_, tokenizer=lowerCAmelCase_ )
processor.save_pretrained(lowerCAmelCase_ )
_a = HubertForCTC(lowerCAmelCase_ )
else:
_a = HubertModel(lowerCAmelCase_ )
if is_finetuned:
_a = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path], arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
_a = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
_a = model[0].eval()
recursively_load_weights(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
hf_wavavec.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
__snake_case : str = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
__snake_case : List[str] = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 703 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
__snake_case : List[Any] = [
"EAGER",
"AOT_EAGER",
"INDUCTOR",
"NVFUSER",
"AOT_NVFUSER",
"AOT_CUDAGRAPHS",
"OFI",
"FX2TRT",
"ONNXRT",
"IPEX",
]
def _lowercase ( lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : Union[str, Any]=None, lowerCamelCase__ : Dict=None, lowerCamelCase__ : Optional[int]=None ):
_a = True
while ask_again:
_a = input(lowerCamelCase__ )
try:
if default is not None and len(lowerCamelCase__ ) == 0:
return default
return convert_value(lowerCamelCase__ ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : Optional[Any], lowerCamelCase__ : Dict=[], lowerCamelCase__ : int=None, lowerCamelCase__ : Union[str, Any]=0 ):
_a = BulletMenu(lowerCamelCase__, lowerCamelCase__ )
_a = menu.run(default_choice=lowerCamelCase__ )
return convert_value(lowerCamelCase__ ) if convert_value is not None else result
def _lowercase ( lowerCamelCase__ : str ):
_a = int(lowerCamelCase__ )
return ComputeEnvironment(["LOCAL_MACHINE", "AMAZON_SAGEMAKER"][value] )
def _lowercase ( lowerCamelCase__ : str ):
_a = int(lowerCamelCase__ )
return DistributedType(["NO", "MULTI_CPU", "MULTI_XPU", "MULTI_GPU", "MULTI_NPU", "TPU"][value] )
def _lowercase ( lowerCamelCase__ : Dict ):
_a = int(lowerCamelCase__ )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def _lowercase ( lowerCamelCase__ : List[Any] ):
_a = int(lowerCamelCase__ )
return PrecisionType(["no", "fp16", "bf16", "fp8"][value] )
def _lowercase ( lowerCamelCase__ : str ):
_a = int(lowerCamelCase__ )
return SageMakerDistributedType(["NO", "DATA_PARALLEL", "MODEL_PARALLEL"][value] )
def _lowercase ( lowerCamelCase__ : str ):
return {"yes": True, "no": False}[value.lower()]
class A ( argparse.RawDescriptionHelpFormatter ):
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> int:
_a = super()._format_usage(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
_a = usage.replace("<command> [<args>] " , "" )
return usage
| 691 | 0 |
'''simple docstring'''
from __future__ import annotations
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Dict , __UpperCAmelCase : list[list[int]] ):
'''simple docstring'''
_A = TypeError(
"Matrices must be formed from a list of zero or more lists containing at "
"least one and the same number of values, each of which must be of type "
"int or float." )
if len(__UpperCAmelCase ) != 0:
_A = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(__UpperCAmelCase ) != cols:
raise error
for value in row:
if not isinstance(__UpperCAmelCase , (int, float) ):
raise error
_A = rows
else:
_A = []
def lowerCAmelCase ( self : str ):
'''simple docstring'''
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def lowerCAmelCase ( self : str ):
'''simple docstring'''
return len(self.rows )
@property
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
return len(self.rows[0] )
@property
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
return (self.num_rows, self.num_columns)
@property
def lowerCAmelCase ( self : int ):
'''simple docstring'''
return self.order[0] == self.order[1]
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
_A = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(__UpperCAmelCase )
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def lowerCAmelCase ( self : str ):
'''simple docstring'''
return bool(self.determinant() )
def lowerCAmelCase ( self : Dict , __UpperCAmelCase : int , __UpperCAmelCase : int ):
'''simple docstring'''
_A = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(__UpperCAmelCase ).determinant()
def lowerCAmelCase ( self : str , __UpperCAmelCase : int , __UpperCAmelCase : int ):
'''simple docstring'''
if (row + column) % 2 == 0:
return self.get_minor(__UpperCAmelCase , __UpperCAmelCase )
return -1 * self.get_minor(__UpperCAmelCase , __UpperCAmelCase )
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
return Matrix(
[
[self.get_minor(__UpperCAmelCase , __UpperCAmelCase ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
_A = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(__UpperCAmelCase )
def lowerCAmelCase ( self : str ):
'''simple docstring'''
_A = self.determinant()
if not determinant:
raise TypeError("Only matrices with a non-zero determinant have an inverse" )
return self.adjugate() * (1 / determinant)
def __repr__( self : int ):
'''simple docstring'''
return str(self.rows )
def __str__( self : List[Any] ):
'''simple docstring'''
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
"[" + ". ".join([str(__UpperCAmelCase ) for value in row] ) + ".]"
for row in self.rows
] )
+ "]"
)
def lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : list[int] , __UpperCAmelCase : int | None = None ):
'''simple docstring'''
_A = TypeError("Row must be a list containing all ints and/or floats" )
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise type_error
for value in row:
if not isinstance(__UpperCAmelCase , (int, float) ):
raise type_error
if len(__UpperCAmelCase ) != self.num_columns:
raise ValueError(
"Row must be equal in length to the other rows in the matrix" )
if position is None:
self.rows.append(__UpperCAmelCase )
else:
_A = self.rows[0:position] + [row] + self.rows[position:]
def lowerCAmelCase ( self : str , __UpperCAmelCase : list[int] , __UpperCAmelCase : int | None = None ):
'''simple docstring'''
_A = TypeError(
"Column must be a list containing all ints and/or floats" )
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise type_error
for value in column:
if not isinstance(__UpperCAmelCase , (int, float) ):
raise type_error
if len(__UpperCAmelCase ) != self.num_rows:
raise ValueError(
"Column must be equal in length to the other columns in the matrix" )
if position is None:
_A = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
_A = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self : Tuple , __UpperCAmelCase : object ):
'''simple docstring'''
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
return NotImplemented
return self.rows == other.rows
def __ne__( self : str , __UpperCAmelCase : object ):
'''simple docstring'''
return not self == other
def __neg__( self : str ):
'''simple docstring'''
return self * -1
def __add__( self : Dict , __UpperCAmelCase : Matrix ):
'''simple docstring'''
if self.order != other.order:
raise ValueError("Addition requires matrices of the same order" )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self : Union[str, Any] , __UpperCAmelCase : Matrix ):
'''simple docstring'''
if self.order != other.order:
raise ValueError("Subtraction requires matrices of the same order" )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self : Optional[Any] , __UpperCAmelCase : Matrix | int | float ):
'''simple docstring'''
if isinstance(__UpperCAmelCase , (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ):
if self.num_columns != other.num_rows:
raise ValueError(
"The number of columns in the first matrix must "
"be equal to the number of rows in the second" )
return Matrix(
[
[Matrix.dot_product(__UpperCAmelCase , __UpperCAmelCase ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
"A Matrix can only be multiplied by an int, float, or another matrix" )
def __pow__( self : List[str] , __UpperCAmelCase : int ):
'''simple docstring'''
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError("A Matrix can only be raised to the power of an int" )
if not self.is_square:
raise ValueError("Only square matrices can be raised to a power" )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
"Only invertable matrices can be raised to a negative power" )
_A = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def lowerCAmelCase ( cls : List[Any] , __UpperCAmelCase : list[int] , __UpperCAmelCase : list[int] ):
'''simple docstring'''
return sum(row[i] * column[i] for i in range(len(__UpperCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 330 |
'''simple docstring'''
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
lowerCamelCase_ = {
'''sample_size''': 32,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': 10_00,
'''block_out_channels''': [32, 64],
'''attention_head_dim''': 8,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
lowerCamelCase_ = {
'''sample_size''': 64,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 3,
'''num_class_embeds''': 10_00,
'''block_out_channels''': [1_92, 1_92 * 2, 1_92 * 3, 1_92 * 4],
'''attention_head_dim''': 64,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
lowerCamelCase_ = {
'''sample_size''': 2_56,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': None,
'''block_out_channels''': [2_56, 2_56, 2_56 * 2, 2_56 * 2, 2_56 * 4, 2_56 * 4],
'''attention_head_dim''': 64,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''default''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
lowerCamelCase_ = {
'''num_train_timesteps''': 40,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
lowerCamelCase_ = {
'''num_train_timesteps''': 2_01,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
lowerCamelCase_ = {
'''num_train_timesteps''': 1_51,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
def __lowercase ( __lowercase ) -> int:
'''simple docstring'''
if isinstance(__lowercase , __lowercase ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("boolean value expected" )
def __lowercase ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase=False ) -> Tuple:
'''simple docstring'''
_A = checkpoint[F'''{old_prefix}.in_layers.0.weight''']
_A = checkpoint[F'''{old_prefix}.in_layers.0.bias''']
_A = checkpoint[F'''{old_prefix}.in_layers.2.weight''']
_A = checkpoint[F'''{old_prefix}.in_layers.2.bias''']
_A = checkpoint[F'''{old_prefix}.emb_layers.1.weight''']
_A = checkpoint[F'''{old_prefix}.emb_layers.1.bias''']
_A = checkpoint[F'''{old_prefix}.out_layers.0.weight''']
_A = checkpoint[F'''{old_prefix}.out_layers.0.bias''']
_A = checkpoint[F'''{old_prefix}.out_layers.3.weight''']
_A = checkpoint[F'''{old_prefix}.out_layers.3.bias''']
if has_skip:
_A = checkpoint[F'''{old_prefix}.skip_connection.weight''']
_A = checkpoint[F'''{old_prefix}.skip_connection.bias''']
return new_checkpoint
def __lowercase ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase=None ) -> Union[str, Any]:
'''simple docstring'''
_A , _A , _A = checkpoint[F'''{old_prefix}.qkv.weight'''].chunk(3 , dim=0 )
_A , _A , _A = checkpoint[F'''{old_prefix}.qkv.bias'''].chunk(3 , dim=0 )
_A = checkpoint[F'''{old_prefix}.norm.weight''']
_A = checkpoint[F'''{old_prefix}.norm.bias''']
_A = weight_q.squeeze(-1 ).squeeze(-1 )
_A = bias_q.squeeze(-1 ).squeeze(-1 )
_A = weight_k.squeeze(-1 ).squeeze(-1 )
_A = bias_k.squeeze(-1 ).squeeze(-1 )
_A = weight_v.squeeze(-1 ).squeeze(-1 )
_A = bias_v.squeeze(-1 ).squeeze(-1 )
_A = (
checkpoint[F'''{old_prefix}.proj_out.weight'''].squeeze(-1 ).squeeze(-1 )
)
_A = checkpoint[F'''{old_prefix}.proj_out.bias'''].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def __lowercase ( __lowercase , __lowercase ) -> Any:
'''simple docstring'''
_A = torch.load(__lowercase , map_location="cpu" )
_A = {}
_A = checkpoint["time_embed.0.weight"]
_A = checkpoint["time_embed.0.bias"]
_A = checkpoint["time_embed.2.weight"]
_A = checkpoint["time_embed.2.bias"]
if unet_config["num_class_embeds"] is not None:
_A = checkpoint["label_emb.weight"]
_A = checkpoint["input_blocks.0.0.weight"]
_A = checkpoint["input_blocks.0.0.bias"]
_A = unet_config["down_block_types"]
_A = unet_config["layers_per_block"]
_A = unet_config["attention_head_dim"]
_A = unet_config["block_out_channels"]
_A = 1
_A = channels_list[0]
for i, layer_type in enumerate(__lowercase ):
_A = channels_list[i]
_A = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(__lowercase ):
_A = F'''down_blocks.{i}.resnets.{j}'''
_A = F'''input_blocks.{current_layer}.0'''
_A = True if j == 0 and downsample_block_has_skip else False
_A = convert_resnet(__lowercase , __lowercase , __lowercase , __lowercase , has_skip=__lowercase )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(__lowercase ):
_A = F'''down_blocks.{i}.resnets.{j}'''
_A = F'''input_blocks.{current_layer}.0'''
_A = True if j == 0 and downsample_block_has_skip else False
_A = convert_resnet(__lowercase , __lowercase , __lowercase , __lowercase , has_skip=__lowercase )
_A = F'''down_blocks.{i}.attentions.{j}'''
_A = F'''input_blocks.{current_layer}.1'''
_A = convert_attention(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
current_layer += 1
if i != len(__lowercase ) - 1:
_A = F'''down_blocks.{i}.downsamplers.0'''
_A = F'''input_blocks.{current_layer}.0'''
_A = convert_resnet(__lowercase , __lowercase , __lowercase , __lowercase )
current_layer += 1
_A = current_channels
# hardcoded the mid-block for now
_A = "mid_block.resnets.0"
_A = "middle_block.0"
_A = convert_resnet(__lowercase , __lowercase , __lowercase , __lowercase )
_A = "mid_block.attentions.0"
_A = "middle_block.1"
_A = convert_attention(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
_A = "mid_block.resnets.1"
_A = "middle_block.2"
_A = convert_resnet(__lowercase , __lowercase , __lowercase , __lowercase )
_A = 0
_A = unet_config["up_block_types"]
for i, layer_type in enumerate(__lowercase ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
_A = F'''up_blocks.{i}.resnets.{j}'''
_A = F'''output_blocks.{current_layer}.0'''
_A = convert_resnet(__lowercase , __lowercase , __lowercase , __lowercase , has_skip=__lowercase )
current_layer += 1
if i != len(__lowercase ) - 1:
_A = F'''up_blocks.{i}.upsamplers.0'''
_A = F'''output_blocks.{current_layer-1}.1'''
_A = convert_resnet(__lowercase , __lowercase , __lowercase , __lowercase )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
_A = F'''up_blocks.{i}.resnets.{j}'''
_A = F'''output_blocks.{current_layer}.0'''
_A = convert_resnet(__lowercase , __lowercase , __lowercase , __lowercase , has_skip=__lowercase )
_A = F'''up_blocks.{i}.attentions.{j}'''
_A = F'''output_blocks.{current_layer}.1'''
_A = convert_attention(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
current_layer += 1
if i != len(__lowercase ) - 1:
_A = F'''up_blocks.{i}.upsamplers.0'''
_A = F'''output_blocks.{current_layer-1}.2'''
_A = convert_resnet(__lowercase , __lowercase , __lowercase , __lowercase )
_A = checkpoint["out.0.weight"]
_A = checkpoint["out.0.bias"]
_A = checkpoint["out.2.weight"]
_A = checkpoint["out.2.bias"]
return new_checkpoint
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument('''--unet_path''', default=None, type=str, required=True, help='''Path to the unet.pt to convert.''')
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output the converted UNet model.'''
)
parser.add_argument('''--class_cond''', default=True, type=str, help='''Whether the model is class-conditional.''')
lowerCamelCase_ = parser.parse_args()
lowerCamelCase_ = strabool(args.class_cond)
lowerCamelCase_ = os.path.basename(args.unet_path)
print(F"""Checkpoint: {ckpt_name}""")
# Get U-Net config
if "imagenet64" in ckpt_name:
lowerCamelCase_ = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
lowerCamelCase_ = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
lowerCamelCase_ = TEST_UNET_CONFIG
else:
raise ValueError(F"""Checkpoint type {ckpt_name} is not currently supported.""")
if not args.class_cond:
lowerCamelCase_ = None
lowerCamelCase_ = con_pt_to_diffuser(args.unet_path, unet_config)
lowerCamelCase_ = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
lowerCamelCase_ = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
lowerCamelCase_ = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
lowerCamelCase_ = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(F"""Checkpoint type {ckpt_name} is not currently supported.""")
lowerCamelCase_ = CMStochasticIterativeScheduler(**scheduler_config)
lowerCamelCase_ = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 330 | 1 |
'''simple docstring'''
import pprint
import requests
__UpperCamelCase : List[str] = "https://zenquotes.io/api"
def __UpperCAmelCase ( ) -> Tuple:
"""simple docstring"""
return requests.get(API_ENDPOINT_URL + '/today' ).json()
def __UpperCAmelCase ( ) -> str:
"""simple docstring"""
return requests.get(API_ENDPOINT_URL + '/random' ).json()
if __name__ == "__main__":
__UpperCamelCase : Union[str, Any] = random_quotes()
pprint.pprint(response) | 705 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase : Tuple = {
"""configuration_blip_2""": [
"""BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Blip2Config""",
"""Blip2QFormerConfig""",
"""Blip2VisionConfig""",
],
"""processing_blip_2""": ["""Blip2Processor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[int] = [
"""BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Blip2Model""",
"""Blip2QFormerModel""",
"""Blip2PreTrainedModel""",
"""Blip2ForConditionalGeneration""",
"""Blip2VisionModel""",
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
__UpperCamelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 270 | 0 |
from __future__ import annotations
import math
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Dict , a : int ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = size
# approximate the overall size of segment tree with given value
SCREAMING_SNAKE_CASE : Any = [0 for i in range(0 , 4 * size )]
# create array to store lazy update
SCREAMING_SNAKE_CASE : Union[str, Any] = [0 for i in range(0 , 4 * size )]
SCREAMING_SNAKE_CASE : Any = [0 for i in range(0 , 4 * size )] # flag for lazy update
def __UpperCamelCase ( self : Tuple , a : int ) -> int:
"""simple docstring"""
return idx * 2
def __UpperCamelCase ( self : str , a : int ) -> int:
"""simple docstring"""
return idx * 2 + 1
def __UpperCamelCase ( self : int , a : int , a : int , a : int , a : list[int] ) -> None:
"""simple docstring"""
if left_element == right_element:
SCREAMING_SNAKE_CASE : int = a[left_element - 1]
else:
SCREAMING_SNAKE_CASE : Optional[int] = (left_element + right_element) // 2
self.build(self.left(a ) , a , a , a )
self.build(self.right(a ) , mid + 1 , a , a )
SCREAMING_SNAKE_CASE : List[Any] = max(
self.segment_tree[self.left(a )] , self.segment_tree[self.right(a )] )
def __UpperCamelCase ( self : Optional[Any] , a : int , a : int , a : int , a : int , a : int , a : int ) -> bool:
"""simple docstring"""
if self.flag[idx] is True:
SCREAMING_SNAKE_CASE : Any = self.lazy[idx]
SCREAMING_SNAKE_CASE : List[str] = False
if left_element != right_element:
SCREAMING_SNAKE_CASE : Optional[Any] = self.lazy[idx]
SCREAMING_SNAKE_CASE : int = self.lazy[idx]
SCREAMING_SNAKE_CASE : Any = True
SCREAMING_SNAKE_CASE : List[Any] = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
SCREAMING_SNAKE_CASE : Optional[Any] = val
if left_element != right_element:
SCREAMING_SNAKE_CASE : str = val
SCREAMING_SNAKE_CASE : str = val
SCREAMING_SNAKE_CASE : Tuple = True
SCREAMING_SNAKE_CASE : Optional[Any] = True
return True
SCREAMING_SNAKE_CASE : int = (left_element + right_element) // 2
self.update(self.left(a ) , a , a , a , a , a )
self.update(self.right(a ) , mid + 1 , a , a , a , a )
SCREAMING_SNAKE_CASE : Optional[int] = max(
self.segment_tree[self.left(a )] , self.segment_tree[self.right(a )] )
return True
def __UpperCamelCase ( self : Dict , a : int , a : int , a : int , a : int , a : int ) -> int | float:
"""simple docstring"""
if self.flag[idx] is True:
SCREAMING_SNAKE_CASE : int = self.lazy[idx]
SCREAMING_SNAKE_CASE : List[Any] = False
if left_element != right_element:
SCREAMING_SNAKE_CASE : Optional[Any] = self.lazy[idx]
SCREAMING_SNAKE_CASE : Optional[Any] = self.lazy[idx]
SCREAMING_SNAKE_CASE : Optional[Any] = True
SCREAMING_SNAKE_CASE : Union[str, Any] = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
SCREAMING_SNAKE_CASE : Dict = (left_element + right_element) // 2
SCREAMING_SNAKE_CASE : Tuple = self.query(self.left(a ) , a , a , a , a )
SCREAMING_SNAKE_CASE : Tuple = self.query(self.right(a ) , mid + 1 , a , a , a )
return max(a , a )
def __str__( self : str ) -> str:
"""simple docstring"""
return str([self.query(1 , 1 , self.size , a , a ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
a_ = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
a_ = 15
a_ = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 11))
print(segt.query(1, 1, size, 7, 12))
segt.update(1, 1, size, 1, 3, 111)
print(segt.query(1, 1, size, 1, 15))
segt.update(1, 1, size, 7, 8, 235)
print(segt) | 25 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def lowercase__ ( A_: Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__UpperCAmelCase =botoa.client("""iam""" )
__UpperCAmelCase ={
"""Version""": """2012-10-17""",
"""Statement""": [
{"""Effect""": """Allow""", """Principal""": {"""Service""": """sagemaker.amazonaws.com"""}, """Action""": """sts:AssumeRole"""}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=A_ , AssumeRolePolicyDocument=json.dumps(A_ , indent=2 ) )
__UpperCAmelCase ={
"""Version""": """2012-10-17""",
"""Statement""": [
{
"""Effect""": """Allow""",
"""Action""": [
"""sagemaker:*""",
"""ecr:GetDownloadUrlForLayer""",
"""ecr:BatchGetImage""",
"""ecr:BatchCheckLayerAvailability""",
"""ecr:GetAuthorizationToken""",
"""cloudwatch:PutMetricData""",
"""cloudwatch:GetMetricData""",
"""cloudwatch:GetMetricStatistics""",
"""cloudwatch:ListMetrics""",
"""logs:CreateLogGroup""",
"""logs:CreateLogStream""",
"""logs:DescribeLogStreams""",
"""logs:PutLogEvents""",
"""logs:GetLogEvents""",
"""s3:CreateBucket""",
"""s3:ListBucket""",
"""s3:GetBucketLocation""",
"""s3:GetObject""",
"""s3:PutObject""",
],
"""Resource""": """*""",
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=A_ , PolicyName=F'''{role_name}_policy_permission''' , PolicyDocument=json.dumps(A_ , indent=2 ) , )
except iam_client.exceptions.EntityAlreadyExistsException:
print(F'''role {role_name} already exists. Using existing one''' )
def lowercase__ ( A_: Dict ) -> Any:
"""simple docstring"""
__UpperCAmelCase =botoa.client("""iam""" )
return iam_client.get_role(RoleName=A_ )["Role"]["Arn"]
def lowercase__ ( ) -> Union[str, Any]:
"""simple docstring"""
__UpperCAmelCase =_ask_options(
"""How do you want to authorize?""" , ["""AWS Profile""", """Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) """] , A_ , )
__UpperCAmelCase =None
if credentials_configuration == 0:
__UpperCAmelCase =_ask_field("""Enter your AWS Profile name: [default] """ , default="""default""" )
__UpperCAmelCase =aws_profile
else:
print(
"""Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,"""
"""`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`""" )
__UpperCAmelCase =_ask_field("""AWS Access Key ID: """ )
__UpperCAmelCase =aws_access_key_id
__UpperCAmelCase =_ask_field("""AWS Secret Access Key: """ )
__UpperCAmelCase =aws_secret_access_key
__UpperCAmelCase =_ask_field("""Enter your AWS Region: [us-east-1]""" , default="""us-east-1""" )
__UpperCAmelCase =aws_region
__UpperCAmelCase =_ask_options(
"""Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?""" , ["""Provide IAM Role name""", """Create new IAM role using credentials"""] , A_ , )
if role_management == 0:
__UpperCAmelCase =_ask_field("""Enter your IAM role name: """ )
else:
__UpperCAmelCase ="""accelerate_sagemaker_execution_role"""
print(F'''Accelerate will create an iam role "{iam_role_name}" using the provided credentials''' )
_create_iam_role_for_sagemaker(A_ )
__UpperCAmelCase =_ask_field(
"""Do you want to use custom Docker image? [yes/NO]: """ , _convert_yes_no_to_bool , default=A_ , error_message="""Please enter yes or no.""" , )
__UpperCAmelCase =None
if is_custom_docker_image:
__UpperCAmelCase =_ask_field("""Enter your Docker image: """ , lambda A_ : str(A_ ).lower() )
__UpperCAmelCase =_ask_field(
"""Do you want to provide SageMaker input channels with data locations? [yes/NO]: """ , _convert_yes_no_to_bool , default=A_ , error_message="""Please enter yes or no.""" , )
__UpperCAmelCase =None
if is_sagemaker_inputs_enabled:
__UpperCAmelCase =_ask_field(
"""Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): """ , lambda A_ : str(A_ ).lower() , )
__UpperCAmelCase =_ask_field(
"""Do you want to enable SageMaker metrics? [yes/NO]: """ , _convert_yes_no_to_bool , default=A_ , error_message="""Please enter yes or no.""" , )
__UpperCAmelCase =None
if is_sagemaker_metrics_enabled:
__UpperCAmelCase =_ask_field(
"""Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): """ , lambda A_ : str(A_ ).lower() , )
__UpperCAmelCase =_ask_options(
"""What is the distributed mode?""" , ["""No distributed training""", """Data parallelism"""] , _convert_sagemaker_distributed_mode , )
__UpperCAmelCase ={}
__UpperCAmelCase =_ask_field(
"""Do you wish to optimize your script with torch dynamo?[yes/NO]:""" , _convert_yes_no_to_bool , default=A_ , error_message="""Please enter yes or no.""" , )
if use_dynamo:
__UpperCAmelCase ="""dynamo_"""
__UpperCAmelCase =_ask_options(
"""Which dynamo backend would you like to use?""" , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , )
__UpperCAmelCase =_ask_field(
"""Do you want to customize the defaults sent to torch.compile? [yes/NO]: """ , _convert_yes_no_to_bool , default=A_ , error_message="""Please enter yes or no.""" , )
if use_custom_options:
__UpperCAmelCase =_ask_options(
"""Which mode do you want to use?""" , A_ , lambda A_ : TORCH_DYNAMO_MODES[int(A_ )] , default="""default""" , )
__UpperCAmelCase =_ask_field(
"""Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: """ , _convert_yes_no_to_bool , default=A_ , error_message="""Please enter yes or no.""" , )
__UpperCAmelCase =_ask_field(
"""Do you want to enable dynamic shape tracing? [yes/NO]: """ , _convert_yes_no_to_bool , default=A_ , error_message="""Please enter yes or no.""" , )
__UpperCAmelCase ="""Which EC2 instance type you want to use for your training?"""
if distributed_type != SageMakerDistributedType.NO:
__UpperCAmelCase =_ask_options(
A_ , A_ , lambda A_ : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(A_ )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
__UpperCAmelCase =_ask_field(A_ , lambda A_ : str(A_ ).lower() , default="""ml.p3.2xlarge""" )
__UpperCAmelCase =1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
__UpperCAmelCase =_ask_field(
"""How many machines do you want use? [1]: """ , A_ , default=1 , )
__UpperCAmelCase =_ask_options(
"""Do you wish to use FP16 or BF16 (mixed precision)?""" , ["""no""", """fp16""", """bf16""", """fp8"""] , _convert_mixed_precision , )
if use_dynamo and mixed_precision == "no":
print(
"""Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.""" )
return SageMakerConfig(
image_uri=A_ , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=A_ , use_cpu=A_ , dynamo_config=A_ , eca_instance_type=A_ , profile=A_ , region=A_ , iam_role_name=A_ , mixed_precision=A_ , num_machines=A_ , sagemaker_inputs_file=A_ , sagemaker_metrics_file=A_ , )
| 68 | 0 |
"""simple docstring"""
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def UpperCamelCase ( _A , _A , _A , _A ) -> Optional[int]:
lowercase : List[Any] = {
"""en""": """Machine learning is great, isn't it?""",
"""ru""": """Машинное обучение - это здорово, не так ли?""",
"""de""": """Maschinelles Lernen ist großartig, nicht wahr?""",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
lowercase : Union[str, Any] = {
"""wmt16-en-de-dist-12-1""": [28.3, 27.52],
"""wmt16-en-de-dist-6-1""": [27.4, 27.11],
"""wmt16-en-de-12-1""": [26.9, 25.75],
}
lowercase : Dict = F"""{src_lang}-{tgt_lang}"""
lowercase : Dict = F"""
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt16
- allenai
license: apache-2.0
datasets:
- wmt16
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.
For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).
All 3 models are available:
* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)
* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)
* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = \"allenai/{model_name}\"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = \"{texts[src_lang]}\"
input_ids = tokenizer.encode(input, return_tensors=\"pt\")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
## Training data
Pretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).
## Eval results
Here are the BLEU scores:
model | fairseq | transformers
-------|---------|----------
{model_name} | {scores[model_name][0]} | {scores[model_name][1]}
The score is slightly below the score reported in the paper, as the researchers don't use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=5
mkdir -p $DATA_DIR
sacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
## Data Sources
- [training, etc.](http://www.statmt.org/wmt16/)
- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)
### BibTeX entry and citation info
```
@misc{{kasai2020deep,
title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},
author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},
year={{2020}},
eprint={{2006.10369}},
archivePrefix={{arXiv}},
primaryClass={{cs.CL}}
}}
```
"""
model_card_dir.mkdir(parents=_A , exist_ok=_A )
lowercase : Any = os.path.join(_A , """README.md""" )
print(F"""Generating {path}""" )
with open(_A , """w""" , encoding="""utf-8""" ) as f:
f.write(_A )
# make sure we are under the root of the project
_lowerCAmelCase = Path(__file__).resolve().parent.parent.parent
_lowerCAmelCase = repo_dir / 'model_cards'
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
_lowerCAmelCase = model_cards_dir / 'allenai' / model_name
write_model_card(model_card_dir, src_lang='en', tgt_lang='de', model_name=model_name)
| 348 |
"""simple docstring"""
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def UpperCamelCase ( _A ) -> Any: # picklable for multiprocessing
return x.sum()
def UpperCamelCase ( _A ) -> Tuple: # picklable for multiprocessing
return i + 1
@dataclass
class UpperCamelCase :
_SCREAMING_SNAKE_CASE : int
_SCREAMING_SNAKE_CASE : str
class UpperCamelCase (__snake_case ):
def __snake_case ( self :List[str] ) ->Optional[int]:
lowercase : Optional[Any] = {}
lowercase : List[str] = []
lowercase : List[str] = 1
lowercase : Any = [1, 2]
lowercase : str = {"""a""": 1, """b""": 2}
lowercase : List[str] = {"""a""": [1, 2], """b""": [3, 4]}
lowercase : Union[str, Any] = {"""a""": {"""1""": 1}, """b""": 2}
lowercase : List[str] = {"""a""": 1, """b""": 2, """c""": 3, """d""": 4}
lowercase : List[Any] = {}
lowercase : Union[str, Any] = []
lowercase : List[str] = 2
lowercase : Tuple = [2, 3]
lowercase : Dict = {"""a""": 2, """b""": 3}
lowercase : Dict = {"""a""": [2, 3], """b""": [4, 5]}
lowercase : Optional[int] = {"""a""": {"""1""": 2}, """b""": 3}
lowercase : str = {"""a""": 2, """b""": 3, """c""": 4, """d""": 5}
self.assertEqual(map_nested(__magic_name__ , __magic_name__ ) , __magic_name__ )
self.assertEqual(map_nested(__magic_name__ , __magic_name__ ) , __magic_name__ )
self.assertEqual(map_nested(__magic_name__ , __magic_name__ ) , __magic_name__ )
self.assertEqual(map_nested(__magic_name__ , __magic_name__ ) , __magic_name__ )
self.assertEqual(map_nested(__magic_name__ , __magic_name__ ) , __magic_name__ )
self.assertEqual(map_nested(__magic_name__ , __magic_name__ ) , __magic_name__ )
self.assertEqual(map_nested(__magic_name__ , __magic_name__ ) , __magic_name__ )
self.assertEqual(map_nested(__magic_name__ , __magic_name__ ) , __magic_name__ )
lowercase : int = 2
self.assertEqual(map_nested(__magic_name__ , __magic_name__ , num_proc=__magic_name__ ) , __magic_name__ )
self.assertEqual(map_nested(__magic_name__ , __magic_name__ , num_proc=__magic_name__ ) , __magic_name__ )
self.assertEqual(map_nested(__magic_name__ , __magic_name__ , num_proc=__magic_name__ ) , __magic_name__ )
self.assertEqual(map_nested(__magic_name__ , __magic_name__ , num_proc=__magic_name__ ) , __magic_name__ )
self.assertEqual(map_nested(__magic_name__ , __magic_name__ , num_proc=__magic_name__ ) , __magic_name__ )
self.assertEqual(map_nested(__magic_name__ , __magic_name__ , num_proc=__magic_name__ ) , __magic_name__ )
self.assertEqual(map_nested(__magic_name__ , __magic_name__ , num_proc=__magic_name__ ) , __magic_name__ )
self.assertEqual(map_nested(__magic_name__ , __magic_name__ , num_proc=__magic_name__ ) , __magic_name__ )
lowercase : Dict = {"""a""": np.eye(2 ), """b""": np.zeros(3 ), """c""": np.ones(2 )}
lowercase : int = {"""a""": 2, """b""": 0, """c""": 2}
lowercase : str = {
"""a""": np.eye(2 ).astype(__magic_name__ ),
"""b""": np.zeros(3 ).astype(__magic_name__ ),
"""c""": np.ones(2 ).astype(__magic_name__ ),
}
self.assertEqual(map_nested(__magic_name__ , __magic_name__ , map_numpy=__magic_name__ ) , __magic_name__ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(__magic_name__ , __magic_name__ , map_numpy=__magic_name__ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(__magic_name__ , __magic_name__ , map_numpy=__magic_name__ , num_proc=__magic_name__ ) , __magic_name__ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(__magic_name__ , __magic_name__ , map_numpy=__magic_name__ , num_proc=__magic_name__ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(__magic_name__ ): # can't pickle a local lambda
map_nested(lambda __magic_name__ : x + 1 , __magic_name__ , num_proc=__magic_name__ )
def __snake_case ( self :Optional[Any] ) ->Optional[int]:
lowercase : Union[str, Any] = {"""a""": 1, """b""": 2}
lowercase : int = {"""a""": 3, """b""": 4}
lowercase : Optional[Any] = {"""a""": 5, """b""": 6}
lowercase : List[str] = sorted([("""a""", (1, 3, 5)), ("""b""", (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(__magic_name__ , __magic_name__ , __magic_name__ ) ) , __magic_name__ )
def __snake_case ( self :Tuple ) ->Union[str, Any]:
class UpperCamelCase :
_SCREAMING_SNAKE_CASE : str = """bar"""
lowercase : Tuple = Foo()
self.assertEqual(foo.my_attr , """bar""" )
with temporary_assignment(__magic_name__ , """my_attr""" , """BAR""" ):
self.assertEqual(foo.my_attr , """BAR""" )
self.assertEqual(foo.my_attr , """bar""" )
@pytest.mark.parametrize(
"""iterable_length, num_proc, expected_num_proc""" , [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] , )
def UpperCamelCase ( _A , _A , _A ) -> int:
with patch("""datasets.utils.py_utils._single_map_nested""" ) as mock_single_map_nested, patch(
"""datasets.parallel.parallel.Pool""" ) as mock_multiprocessing_pool:
lowercase : Union[str, Any] = {F"""{i}""": i for i in range(_A )}
lowercase : List[str] = map_nested(lambda _A : x + 10 , _A , num_proc=_A , parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class UpperCamelCase (__snake_case ):
@require_tf
def __snake_case ( self :str ) ->Dict:
import tensorflow as tf
from tensorflow.keras import layers
lowercase : Any = layers.Dense(2 )
def gen_random_output():
lowercase : Tuple = tf.random.uniform((1, 3) )
return model(__magic_name__ ).numpy()
with temp_seed(42 , set_tensorflow=__magic_name__ ):
lowercase : Tuple = gen_random_output()
with temp_seed(42 , set_tensorflow=__magic_name__ ):
lowercase : Optional[int] = gen_random_output()
lowercase : Any = gen_random_output()
np.testing.assert_equal(__magic_name__ , __magic_name__ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def __snake_case ( self :Union[str, Any] ) ->str:
import torch
def gen_random_output():
lowercase : List[str] = torch.nn.Linear(3 , 2 )
lowercase : List[Any] = torch.rand(1 , 3 )
return model(__magic_name__ ).detach().numpy()
with temp_seed(42 , set_pytorch=__magic_name__ ):
lowercase : Tuple = gen_random_output()
with temp_seed(42 , set_pytorch=__magic_name__ ):
lowercase : int = gen_random_output()
lowercase : List[str] = gen_random_output()
np.testing.assert_equal(__magic_name__ , __magic_name__ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def __snake_case ( self :Dict ) ->int:
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(42 ):
lowercase : str = gen_random_output()
with temp_seed(42 ):
lowercase : Tuple = gen_random_output()
lowercase : Any = gen_random_output()
np.testing.assert_equal(__magic_name__ , __magic_name__ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize("""input_data""" , [{}] )
def UpperCamelCase ( _A ) -> List[Any]:
lowercase : Optional[int] = NestedDataStructure(_A ).data
assert output_data == input_data
@pytest.mark.parametrize(
"""data, expected_output""" , [
({}, []),
([], []),
("""foo""", ["""foo"""]),
(["""foo""", """bar"""], ["""foo""", """bar"""]),
([["""foo""", """bar"""]], ["""foo""", """bar"""]),
([[["""foo"""], ["""bar"""]]], ["""foo""", """bar"""]),
([[["""foo"""], """bar"""]], ["""foo""", """bar"""]),
({"""a""": 1, """b""": 2}, [1, 2]),
({"""a""": [1, 2], """b""": [3, 4]}, [1, 2, 3, 4]),
({"""a""": [[1, 2]], """b""": [[3, 4]]}, [1, 2, 3, 4]),
({"""a""": [[1, 2]], """b""": [3, 4]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [[[3], [4]]]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [[3, 4]]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [3, 4]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [3, [4]]}, [1, 2, 3, 4]),
({"""a""": {"""1""": 1}, """b""": 2}, [1, 2]),
({"""a""": {"""1""": [1]}, """b""": 2}, [1, 2]),
({"""a""": {"""1""": [1]}, """b""": [2]}, [1, 2]),
] , )
def UpperCamelCase ( _A , _A ) -> Any:
lowercase : int = NestedDataStructure(_A ).flatten()
assert output == expected_output
def UpperCamelCase ( ) -> Tuple:
lowercase : str = A(x=1 , y="""foobar""" )
lowercase : Optional[int] = {"""x""": 1, """y""": """foobar"""}
assert asdict(_A ) == expected_output
lowercase : Dict = {"""a""": {"""b""": A(x=10 , y="""foo""" )}, """c""": [A(x=20 , y="""bar""" )]}
lowercase : Optional[Any] = {"""a""": {"""b""": {"""x""": 10, """y""": """foo"""}}, """c""": [{"""x""": 20, """y""": """bar"""}]}
assert asdict(_A ) == expected_output
with pytest.raises(_A ):
asdict([1, A(x=10 , y="""foo""" )] )
def UpperCamelCase ( _A ) -> int:
return text.split()
def UpperCamelCase ( _A ) -> Any:
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def UpperCamelCase ( ) -> Optional[int]:
with Pool(2 ) as pool:
lowercase : int = list(iflatmap_unordered(_A , _split_text , kwargs_iterable=[{"""text""": """hello there"""}] * 10 ) )
assert out.count("""hello""" ) == 10
assert out.count("""there""" ) == 10
assert len(_A ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
lowercase : Union[str, Any] = list(iflatmap_unordered(_A , _split_text , kwargs_iterable=[{"""text""": """hello there"""}] * 10 ) )
assert out.count("""hello""" ) == 10
assert out.count("""there""" ) == 10
assert len(_A ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
lowercase : int = []
for yield_time, content in iflatmap_unordered(
_A , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{"""content""": """a"""}, {"""content""": """b"""}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(_A )
assert out.count("""a""" ) == 2
assert out.count("""b""" ) == 2
assert len(_A ) == 4
| 348 | 1 |
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
_UpperCamelCase = logging.get_logger(__name__)
def lowerCAmelCase__( lowercase : Union[str, Any] , lowercase : int , lowercase : int ) -> str:
return [
int(1000 * (box[0] / width) ),
int(1000 * (box[1] / height) ),
int(1000 * (box[2] / width) ),
int(1000 * (box[3] / height) ),
]
def lowerCAmelCase__( lowercase : np.ndarray , lowercase : Optional[str] , lowercase : Optional[str] ) -> List[Any]:
__snake_case : int = to_pil_image(lowercase )
__snake_case , __snake_case : Optional[int] = pil_image.size
__snake_case : Optional[Any] = pytesseract.image_to_data(lowercase , lang=lowercase , output_type="dict" , config=lowercase )
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Optional[int] = data["text"], data["left"], data["top"], data["width"], data["height"]
# filter empty words and corresponding coordinates
__snake_case : Any = [idx for idx, word in enumerate(lowercase ) if not word.strip()]
__snake_case : str = [word for idx, word in enumerate(lowercase ) if idx not in irrelevant_indices]
__snake_case : int = [coord for idx, coord in enumerate(lowercase ) if idx not in irrelevant_indices]
__snake_case : Optional[Any] = [coord for idx, coord in enumerate(lowercase ) if idx not in irrelevant_indices]
__snake_case : Tuple = [coord for idx, coord in enumerate(lowercase ) if idx not in irrelevant_indices]
__snake_case : List[Any] = [coord for idx, coord in enumerate(lowercase ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
__snake_case : Optional[int] = []
for x, y, w, h in zip(lowercase , lowercase , lowercase , lowercase ):
__snake_case : Union[str, Any] = [x, y, x + w, y + h]
actual_boxes.append(lowercase )
# finally, normalize the bounding boxes
__snake_case : Optional[Any] = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(lowercase , lowercase , lowercase ) )
assert len(lowercase ) == len(lowercase ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class _lowerCamelCase ( a ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] =["pixel_values"]
def __init__( self , UpperCAmelCase = True , UpperCAmelCase = None , UpperCAmelCase = PILImageResampling.BILINEAR , UpperCAmelCase = True , UpperCAmelCase = 1 / 255 , UpperCAmelCase = True , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = True , UpperCAmelCase = None , UpperCAmelCase = "" , **UpperCAmelCase , ) -> None:
'''simple docstring'''
super().__init__(**UpperCAmelCase )
__snake_case : Optional[int] = size if size is not None else {"height": 224, "width": 224}
__snake_case : Dict = get_size_dict(UpperCAmelCase )
__snake_case : Any = do_resize
__snake_case : List[Any] = size
__snake_case : Dict = resample
__snake_case : Union[str, Any] = do_rescale
__snake_case : Optional[Any] = rescale_value
__snake_case : Any = do_normalize
__snake_case : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__snake_case : Tuple = image_std if image_std is not None else IMAGENET_STANDARD_STD
__snake_case : Optional[int] = apply_ocr
__snake_case : Optional[Any] = ocr_lang
__snake_case : Tuple = tesseract_config
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = PILImageResampling.BILINEAR , UpperCAmelCase = None , **UpperCAmelCase , ) -> np.ndarray:
'''simple docstring'''
__snake_case : Optional[int] = get_size_dict(UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
__snake_case : Union[str, Any] = (size["height"], size["width"])
return resize(UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , **UpperCAmelCase , ) -> np.ndarray:
'''simple docstring'''
return rescale(UpperCAmelCase , scale=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , **UpperCAmelCase , ) -> np.ndarray:
'''simple docstring'''
return normalize(UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase=None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = ChannelDimension.FIRST , **UpperCAmelCase , ) -> PIL.Image.Image:
'''simple docstring'''
__snake_case : Dict = do_resize if do_resize is not None else self.do_resize
__snake_case : Dict = size if size is not None else self.size
__snake_case : Optional[int] = get_size_dict(UpperCAmelCase )
__snake_case : str = resample if resample is not None else self.resample
__snake_case : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
__snake_case : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
__snake_case : List[Any] = do_normalize if do_normalize is not None else self.do_normalize
__snake_case : Any = image_mean if image_mean is not None else self.image_mean
__snake_case : Tuple = image_std if image_std is not None else self.image_std
__snake_case : List[str] = apply_ocr if apply_ocr is not None else self.apply_ocr
__snake_case : List[str] = ocr_lang if ocr_lang is not None else self.ocr_lang
__snake_case : int = tesseract_config if tesseract_config is not None else self.tesseract_config
__snake_case : Dict = make_list_of_images(UpperCAmelCase )
if not valid_images(UpperCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("If do_normalize is True, image_mean and image_std must be specified." )
# All transformations expect numpy arrays.
__snake_case : Dict = [to_numpy_array(UpperCAmelCase ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , "pytesseract" )
__snake_case : int = []
__snake_case : Any = []
for image in images:
__snake_case , __snake_case : Dict = apply_tesseract(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
words_batch.append(UpperCAmelCase )
boxes_batch.append(UpperCAmelCase )
if do_resize:
__snake_case : Any = [self.resize(image=UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase ) for image in images]
if do_rescale:
__snake_case : Tuple = [self.rescale(image=UpperCAmelCase , scale=UpperCAmelCase ) for image in images]
if do_normalize:
__snake_case : Any = [self.normalize(image=UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase ) for image in images]
__snake_case : int = [to_channel_dimension_format(UpperCAmelCase , UpperCAmelCase ) for image in images]
__snake_case : Optional[Any] = BatchFeature(data={"pixel_values": images} , tensor_type=UpperCAmelCase )
if apply_ocr:
__snake_case : Dict = words_batch
__snake_case : Tuple = boxes_batch
return data
| 243 |
import itertools
import math
def lowerCAmelCase__( lowercase : int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowercase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowerCAmelCase__( ) -> Optional[int]:
__snake_case : List[Any] = 2
while True:
if is_prime(lowercase ):
yield num
num += 1
def lowerCAmelCase__( lowercase : int = 1_0001 ) -> int:
return next(itertools.islice(prime_generator() , nth - 1 , lowercase ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 243 | 1 |
"""simple docstring"""
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
SCREAMING_SNAKE_CASE__ : Optional[Any] = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def A_ ( UpperCAmelCase__ ) -> Any:
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def A_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> Dict:
return max(metric_fn(UpperCAmelCase__ , UpperCAmelCase__ ) for gt in ground_truths )
def A_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> Optional[int]:
a : Tuple = [line.strip() for line in open(UpperCAmelCase__ , 'r' ).readlines()]
a : Union[str, Any] = []
if args.gold_data_mode == "qa":
a : int = pd.read_csv(UpperCAmelCase__ , sep='\t' , header=UpperCAmelCase__ )
for answer_list in data[1]:
a : Union[str, Any] = ast.literal_eval(UpperCAmelCase__ )
answers.append(UpperCAmelCase__ )
else:
a : Union[str, Any] = [line.strip() for line in open(UpperCAmelCase__ , 'r' ).readlines()]
a : str = [[reference] for reference in references]
a : List[str] = 0
for prediction, ground_truths in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
total += 1
em += metric_max_over_ground_truths(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
fa += metric_max_over_ground_truths(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
a : Any = 1_0_0.0 * em / total
a : Optional[Any] = 1_0_0.0 * fa / total
logger.info(F'F1: {fa:.2f}' )
logger.info(F'EM: {em:.2f}' )
def A_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> int:
a : Any = args.k
a : Tuple = [line.strip() for line in open(UpperCAmelCase__ , 'r' ).readlines()]
a : Any = [line.strip() for line in open(UpperCAmelCase__ , 'r' ).readlines()]
a : List[Any] = 0
for hypo, reference in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
a : Union[str, Any] = set(hypo.split('\t' )[:k] )
a : str = set(reference.split('\t' ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
a : List[str] = 1_0_0.0 * em / total
logger.info(F'Precision@{k}: {em: .2f}' )
def A_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> int:
def strip_title(UpperCAmelCase__ ):
if title.startswith('"' ):
a : List[str] = title[1:]
if title.endswith('"' ):
a : List[Any] = title[:-1]
return title
a : Union[str, Any] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
UpperCAmelCase__ , return_tensors='pt' , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , )['input_ids'].to(args.device )
a : int = rag_model.rag.question_encoder(UpperCAmelCase__ )
a : List[str] = question_enc_outputs[0]
a : Optional[int] = rag_model.retriever(
UpperCAmelCase__ , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors='pt' , )
a : List[str] = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
a : Union[str, Any] = []
for docs in all_docs:
a : Tuple = [strip_title(UpperCAmelCase__ ) for title in docs['title']]
provenance_strings.append('\t'.join(UpperCAmelCase__ ) )
return provenance_strings
def A_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> Optional[int]:
with torch.no_grad():
a : Union[str, Any] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
UpperCAmelCase__ , return_tensors='pt' , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ )
a : Tuple = inputs_dict.input_ids.to(args.device )
a : Any = inputs_dict.attention_mask.to(args.device )
a : Dict = rag_model.generate( # rag_model overwrites generate
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=UpperCAmelCase__ , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
a : int = rag_model.retriever.generator_tokenizer.batch_decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ )
if args.print_predictions:
for q, a in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
logger.info('Q: {} - A: {}'.format(UpperCAmelCase__ , UpperCAmelCase__ ) )
return answers
def A_ ( ) -> str:
a : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'--model_type' , choices=['rag_sequence', 'rag_token', 'bart'] , type=UpperCAmelCase__ , help=(
'RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the'
' model_name_or_path'
) , )
parser.add_argument(
'--index_name' , default=UpperCAmelCase__ , choices=['exact', 'compressed', 'legacy'] , type=UpperCAmelCase__ , help='RAG model retriever type' , )
parser.add_argument(
'--index_path' , default=UpperCAmelCase__ , type=UpperCAmelCase__ , help='Path to the retrieval index' , )
parser.add_argument('--n_docs' , default=5 , type=UpperCAmelCase__ , help='Number of retrieved docs' )
parser.add_argument(
'--model_name_or_path' , default=UpperCAmelCase__ , type=UpperCAmelCase__ , required=UpperCAmelCase__ , help='Path to pretrained checkpoints or model identifier from huggingface.co/models' , )
parser.add_argument(
'--eval_mode' , choices=['e2e', 'retrieval'] , default='e2e' , type=UpperCAmelCase__ , help=(
'Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates'
' precision@k.'
) , )
parser.add_argument('--k' , default=1 , type=UpperCAmelCase__ , help='k for the precision@k calculation' )
parser.add_argument(
'--evaluation_set' , default=UpperCAmelCase__ , type=UpperCAmelCase__ , required=UpperCAmelCase__ , help='Path to a file containing evaluation samples' , )
parser.add_argument(
'--gold_data_path' , default=UpperCAmelCase__ , type=UpperCAmelCase__ , required=UpperCAmelCase__ , help='Path to a tab-separated file with gold samples' , )
parser.add_argument(
'--gold_data_mode' , default='qa' , type=UpperCAmelCase__ , choices=['qa', 'ans'] , help=(
'Format of the gold data file'
'qa - a single line in the following format: question [tab] answer_list'
'ans - a single line of the gold file contains the expected answer string'
) , )
parser.add_argument(
'--predictions_path' , type=UpperCAmelCase__ , default='predictions.txt' , help='Name of the predictions file, to be stored in the checkpoints directory' , )
parser.add_argument(
'--eval_all_checkpoints' , action='store_true' , help='Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number' , )
parser.add_argument(
'--eval_batch_size' , default=8 , type=UpperCAmelCase__ , help='Batch size per GPU/CPU for evaluation.' , )
parser.add_argument(
'--recalculate' , help='Recalculate predictions even if the prediction file exists' , action='store_true' , )
parser.add_argument(
'--num_beams' , default=4 , type=UpperCAmelCase__ , help='Number of beams to be used when generating answers' , )
parser.add_argument('--min_length' , default=1 , type=UpperCAmelCase__ , help='Min length of the generated answers' )
parser.add_argument('--max_length' , default=50 , type=UpperCAmelCase__ , help='Max length of the generated answers' )
parser.add_argument(
'--print_predictions' , action='store_true' , help='If True, prints predictions while evaluating.' , )
parser.add_argument(
'--print_docs' , action='store_true' , help='If True, prints docs retried while generating.' , )
a : Any = parser.parse_args()
a : Tuple = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
return args
def A_ ( UpperCAmelCase__ ) -> List[Any]:
a : Union[str, Any] = {}
if args.model_type is None:
a : Tuple = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith('rag' ):
a : List[Any] = RagTokenForGeneration if args.model_type == 'rag_token' else RagSequenceForGeneration
a : Any = args.n_docs
if args.index_name is not None:
a : Any = args.index_name
if args.index_path is not None:
a : Optional[int] = args.index_path
else:
a : List[Any] = BartForConditionalGeneration
a : Any = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info('Evaluate the following checkpoints: %s' , UpperCAmelCase__ )
a : Any = get_scores if args.eval_mode == 'e2e' else get_precision_at_k
a : List[str] = evaluate_batch_eae if args.eval_mode == 'e2e' else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info('Calculating metrics based on an existing predictions file: {}'.format(args.predictions_path ) )
score_fn(UpperCAmelCase__ , args.predictions_path , args.gold_data_path )
continue
logger.info('***** Running evaluation for {} *****'.format(UpperCAmelCase__ ) )
logger.info(' Batch size = %d' , args.eval_batch_size )
logger.info(' Predictions will be stored under {}'.format(args.predictions_path ) )
if args.model_type.startswith('rag' ):
a : Tuple = RagRetriever.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
a : Union[str, Any] = model_class.from_pretrained(UpperCAmelCase__ , retriever=UpperCAmelCase__ , **UpperCAmelCase__ )
model.retriever.init_retrieval()
else:
a : Optional[int] = model_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
model.to(args.device )
with open(args.evaluation_set , 'r' ) as eval_file, open(args.predictions_path , 'w' ) as preds_file:
a : Optional[Any] = []
for line in tqdm(UpperCAmelCase__ ):
questions.append(line.strip() )
if len(UpperCAmelCase__ ) == args.eval_batch_size:
a : Union[str, Any] = evaluate_batch_fn(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
preds_file.write('\n'.join(UpperCAmelCase__ ) + '\n' )
preds_file.flush()
a : List[str] = []
if len(UpperCAmelCase__ ) > 0:
a : str = evaluate_batch_fn(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
preds_file.write('\n'.join(UpperCAmelCase__ ) )
preds_file.flush()
score_fn(UpperCAmelCase__ , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : int = get_args()
main(args)
| 509 |
"""simple docstring"""
def A_ ( UpperCAmelCase__ , UpperCAmelCase__ ) -> float:
if discount_rate < 0:
raise ValueError('Discount rate cannot be negative' )
if not cash_flows:
raise ValueError('Cash flows list cannot be empty' )
a : List[str] = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(UpperCAmelCase__ ) )
return round(UpperCAmelCase__ , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 509 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase : Optional[int] = logging.get_logger(__name__)
_UpperCamelCase : List[Any] = {
"microsoft/markuplm-base": "https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json",
"microsoft/markuplm-large": "https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json",
}
class _snake_case ( a_ ):
SCREAMING_SNAKE_CASE : Optional[Any] = '''markuplm'''
def __init__( self , _SCREAMING_SNAKE_CASE=3_05_22 , _SCREAMING_SNAKE_CASE=7_68 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=30_72 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=5_12 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1e-12 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=2_56 , _SCREAMING_SNAKE_CASE=10_24 , _SCREAMING_SNAKE_CASE=2_16 , _SCREAMING_SNAKE_CASE=10_01 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=50 , _SCREAMING_SNAKE_CASE="absolute" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ):
'''simple docstring'''
super().__init__(
pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = hidden_act
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = position_embedding_type
lowerCAmelCase = use_cache
lowerCAmelCase = classifier_dropout
# additional properties
lowerCAmelCase = max_depth
lowerCAmelCase = max_xpath_tag_unit_embeddings
lowerCAmelCase = max_xpath_subs_unit_embeddings
lowerCAmelCase = tag_pad_id
lowerCAmelCase = subs_pad_id
lowerCAmelCase = xpath_unit_hidden_size
| 284 |
'''simple docstring'''
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _snake_case ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase , lowerCAmelCase = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-canny' , from_pt=_SCREAMING_SNAKE_CASE , dtype=jnp.bfloataa )
lowerCAmelCase , lowerCAmelCase = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , controlnet=_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE , dtype=jnp.bfloataa )
lowerCAmelCase = controlnet_params
lowerCAmelCase = 'bird'
lowerCAmelCase = jax.device_count()
lowerCAmelCase = pipe.prepare_text_inputs([prompts] * num_samples )
lowerCAmelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png' )
lowerCAmelCase = pipe.prepare_image_inputs([canny_image] * num_samples )
lowerCAmelCase = jax.random.PRNGKey(0 )
lowerCAmelCase = jax.random.split(_SCREAMING_SNAKE_CASE , jax.device_count() )
lowerCAmelCase = replicate(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = shard(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = shard(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = pipe(
prompt_ids=_SCREAMING_SNAKE_CASE , image=_SCREAMING_SNAKE_CASE , params=_SCREAMING_SNAKE_CASE , prng_seed=_SCREAMING_SNAKE_CASE , num_inference_steps=50 , jit=_SCREAMING_SNAKE_CASE , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
lowerCAmelCase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowerCAmelCase = images[0, 2_53:2_56, 2_53:2_56, -1]
lowerCAmelCase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowerCAmelCase = jnp.array(
[0.167_969, 0.116_699, 0.081_543, 0.154_297, 0.132_812, 0.108_887, 0.169_922, 0.169_922, 0.205_078] )
print(F'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase , lowerCAmelCase = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-openpose' , from_pt=_SCREAMING_SNAKE_CASE , dtype=jnp.bfloataa )
lowerCAmelCase , lowerCAmelCase = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , controlnet=_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE , dtype=jnp.bfloataa )
lowerCAmelCase = controlnet_params
lowerCAmelCase = 'Chef in the kitchen'
lowerCAmelCase = jax.device_count()
lowerCAmelCase = pipe.prepare_text_inputs([prompts] * num_samples )
lowerCAmelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png' )
lowerCAmelCase = pipe.prepare_image_inputs([pose_image] * num_samples )
lowerCAmelCase = jax.random.PRNGKey(0 )
lowerCAmelCase = jax.random.split(_SCREAMING_SNAKE_CASE , jax.device_count() )
lowerCAmelCase = replicate(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = shard(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = shard(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = pipe(
prompt_ids=_SCREAMING_SNAKE_CASE , image=_SCREAMING_SNAKE_CASE , params=_SCREAMING_SNAKE_CASE , prng_seed=_SCREAMING_SNAKE_CASE , num_inference_steps=50 , jit=_SCREAMING_SNAKE_CASE , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
lowerCAmelCase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowerCAmelCase = images[0, 2_53:2_56, 2_53:2_56, -1]
lowerCAmelCase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowerCAmelCase = jnp.array(
[[0.271_484, 0.261_719, 0.275_391, 0.277_344, 0.279_297, 0.291_016, 0.294_922, 0.302_734, 0.302_734]] )
print(F'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 284 | 1 |
"""simple docstring"""
def snake_case ( UpperCamelCase__ : int , UpperCamelCase__ : int ) -> int:
return int((input_a, input_a).count(0 ) != 0 )
def snake_case ( ) -> None:
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 42 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase :Dict = logging.get_logger()
def snake_case ( UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : LevitConfig , UpperCamelCase__ : Path , UpperCamelCase__ : bool = True ) -> Dict:
print(F'Converting {name}...' )
with torch.no_grad():
if hidden_sizes == 128:
if name[-1] == "S":
lowerCamelCase : Optional[Any] = timm.create_model("""levit_128s""" , pretrained=UpperCamelCase__ )
else:
lowerCamelCase : Dict = timm.create_model("""levit_128""" , pretrained=UpperCamelCase__ )
if hidden_sizes == 192:
lowerCamelCase : Tuple = timm.create_model("""levit_192""" , pretrained=UpperCamelCase__ )
if hidden_sizes == 256:
lowerCamelCase : Optional[int] = timm.create_model("""levit_256""" , pretrained=UpperCamelCase__ )
if hidden_sizes == 384:
lowerCamelCase : Dict = timm.create_model("""levit_384""" , pretrained=UpperCamelCase__ )
from_model.eval()
lowerCamelCase : Optional[Any] = LevitForImageClassificationWithTeacher(UpperCamelCase__ ).eval()
lowerCamelCase : Tuple = OrderedDict()
lowerCamelCase : Optional[Any] = from_model.state_dict()
lowerCamelCase : str = list(from_model.state_dict().keys() )
lowerCamelCase : List[Any] = list(our_model.state_dict().keys() )
print(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) )
for i in range(len(UpperCamelCase__ ) ):
lowerCamelCase : str = weights[og_keys[i]]
our_model.load_state_dict(UpperCamelCase__ )
lowerCamelCase : int = torch.randn((2, 3, 224, 224) )
lowerCamelCase : Any = from_model(UpperCamelCase__ )
lowerCamelCase : List[Any] = our_model(UpperCamelCase__ ).logits
assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ ), "The model logits don't match the original one."
lowerCamelCase : Dict = name
print(UpperCamelCase__ )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
lowerCamelCase : Optional[int] = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(F'Pushed {checkpoint_name}' )
def snake_case ( UpperCamelCase__ : Path , UpperCamelCase__ : str = None , UpperCamelCase__ : bool = True ) -> Optional[int]:
lowerCamelCase : Optional[Any] = """imagenet-1k-id2label.json"""
lowerCamelCase : List[Any] = 1000
lowerCamelCase : Dict = (1, num_labels)
lowerCamelCase : List[Any] = """huggingface/label-files"""
lowerCamelCase : Optional[int] = num_labels
lowerCamelCase : List[str] = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type="""dataset""" ) , """r""" ) )
lowerCamelCase : Any = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
lowerCamelCase : List[Any] = idalabel
lowerCamelCase : str = {v: k for k, v in idalabel.items()}
lowerCamelCase : Tuple = partial(UpperCamelCase__ , num_labels=UpperCamelCase__ , idalabel=UpperCamelCase__ , labelaid=UpperCamelCase__ )
lowerCamelCase : Optional[int] = {
"""levit-128S""": 128,
"""levit-128""": 128,
"""levit-192""": 192,
"""levit-256""": 256,
"""levit-384""": 384,
}
lowerCamelCase : List[Any] = {
"""levit-128S""": ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
"""levit-128""": ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
"""levit-192""": ImageNetPreTrainedConfig(
hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
"""levit-256""": ImageNetPreTrainedConfig(
hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
"""levit-384""": ImageNetPreTrainedConfig(
hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , UpperCamelCase__ , names_to_config[model_name] , UpperCamelCase__ , UpperCamelCase__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return config, expected_shape
if __name__ == "__main__":
__lowerCamelCase :Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help='The name of the model you wish to convert, it must be one of the supported Levit* architecture,',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='levit-dump-folder/',
type=Path,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
__lowerCamelCase :List[Any] = parser.parse_args()
__lowerCamelCase :Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 42 | 1 |
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> np.ndarray:
"""simple docstring"""
A__ = cva.getAffineTransform(lowercase_ , lowercase_ )
return cva.warpAffine(lowercase_ , lowercase_ , (rows, cols) )
if __name__ == "__main__":
# read original image
_lowerCamelCase : Optional[Any] = cva.imread(
str(Path(__file__).resolve().parent.parent / """image_data""" / """lena.jpg""")
)
# turn image in gray scale value
_lowerCamelCase : List[str] = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
_lowerCamelCase , _lowerCamelCase : List[str] = gray_img.shape
# set different points to rotate image
_lowerCamelCase : List[str] = np.array([[50, 50], [200, 50], [50, 200]], np.floataa)
_lowerCamelCase : Union[str, Any] = np.array([[10, 100], [200, 50], [100, 250]], np.floataa)
_lowerCamelCase : Any = np.array([[50, 50], [150, 50], [120, 200]], np.floataa)
_lowerCamelCase : Tuple = np.array([[10, 100], [80, 50], [180, 250]], np.floataa)
# add all rotated images in a list
_lowerCamelCase : Union[str, Any] = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
_lowerCamelCase : Any = plt.figure(1)
_lowerCamelCase : Optional[int] = ["""Original""", """Rotation 1""", """Rotation 2""", """Rotation 3"""]
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, """gray""")
plt.title(titles[i])
plt.axis("""off""")
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 87 |
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: int = 100_0000 ) -> int:
_UpperCAmelCase : str = set(range(3 , lowerCAmelCase , 2 ) )
primes.add(2 )
for p in range(3 , lowerCAmelCase , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , lowerCAmelCase , lowerCAmelCase ) ) )
_UpperCAmelCase : Tuple = [float(lowerCAmelCase ) for n in range(limit + 1 )]
for p in primes:
for n in range(lowerCAmelCase , limit + 1 , lowerCAmelCase ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 300 | 0 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( _lowercase : str = 100_0000 ) ->Optional[int]:
'''simple docstring'''
a : Optional[int] = limit + 1
a : Dict = [0] * limit
for first_term in range(1 , _lowercase ):
for n in range(_lowercase , _lowercase , _lowercase ):
a : List[str] = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
a : Optional[Any] = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 708 |
"""simple docstring"""
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class __UpperCamelCase ( unittest.TestCase ):
def __a ( self ) -> Optional[Any]:
a : Optional[int] = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ ) )
def __a ( self ) -> Optional[Any]:
a : str = [
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ ) )
def __a ( self ) -> Dict:
a : List[str] = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
"unet/diffusion_pytorch_model.bin",
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(lowerCAmelCase__ ) )
def __a ( self ) -> List[Any]:
a : Optional[Any] = [
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
]
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ ) )
def __a ( self ) -> Tuple:
a : Tuple = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
# Removed: 'text_encoder/model.safetensors',
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertFalse(is_safetensors_compatible(lowerCAmelCase__ ) )
def __a ( self ) -> Dict:
a : Dict = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
a : Dict = "fp16"
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
def __a ( self ) -> List[str]:
a : List[Any] = [
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
a : Any = "fp16"
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
def __a ( self ) -> int:
# pass variant but use the non-variant filenames
a : int = [
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
a : Tuple = "fp16"
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
def __a ( self ) -> str:
a : str = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
"unet/diffusion_pytorch_model.fp16.bin",
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
a : Any = "fp16"
self.assertFalse(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
def __a ( self ) -> str:
a : Union[str, Any] = [
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
]
a : str = "fp16"
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
def __a ( self ) -> List[str]:
# pass variant but use the non-variant filenames
a : Optional[int] = [
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
]
a : str = "fp16"
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
def __a ( self ) -> Optional[Any]:
a : Any = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
# 'text_encoder/model.fp16.safetensors',
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
a : Optional[int] = "fp16"
self.assertFalse(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
| 31 | 0 |
'''simple docstring'''
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
snake_case_ : List[str] = logging.get_logger(__name__)
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
return [
int(1_0_0_0 * (box[0] / width) ),
int(1_0_0_0 * (box[1] / height) ),
int(1_0_0_0 * (box[2] / width) ),
int(1_0_0_0 * (box[3] / height) ),
]
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = None ):
_UpperCamelCase : Optional[int] = tesseract_config if tesseract_config is not None else ''
# apply OCR
_UpperCamelCase : int = to_pil_image(UpperCAmelCase_ )
_UpperCamelCase , _UpperCamelCase : Dict = pil_image.size
_UpperCamelCase : Tuple = pytesseract.image_to_data(UpperCAmelCase_ , lang=UpperCAmelCase_ , output_type='dict' , config=UpperCAmelCase_ )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase : int = data['text'], data['left'], data['top'], data['width'], data['height']
# filter empty words and corresponding coordinates
_UpperCamelCase : Optional[Any] = [idx for idx, word in enumerate(UpperCAmelCase_ ) if not word.strip()]
_UpperCamelCase : Any = [word for idx, word in enumerate(UpperCAmelCase_ ) if idx not in irrelevant_indices]
_UpperCamelCase : Any = [coord for idx, coord in enumerate(UpperCAmelCase_ ) if idx not in irrelevant_indices]
_UpperCamelCase : Union[str, Any] = [coord for idx, coord in enumerate(UpperCAmelCase_ ) if idx not in irrelevant_indices]
_UpperCamelCase : Union[str, Any] = [coord for idx, coord in enumerate(UpperCAmelCase_ ) if idx not in irrelevant_indices]
_UpperCamelCase : List[str] = [coord for idx, coord in enumerate(UpperCAmelCase_ ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
_UpperCamelCase : Tuple = []
for x, y, w, h in zip(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase : List[str] = [x, y, x + w, y + h]
actual_boxes.append(UpperCAmelCase_ )
# finally, normalize the bounding boxes
_UpperCamelCase : List[Any] = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) )
assert len(UpperCAmelCase_ ) == len(UpperCAmelCase_ ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class lowercase__ ( lowercase ):
lowercase__ = ["""pixel_values"""]
def __init__( self : Dict ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : Dict[str, int] = None ,lowerCamelCase__ : PILImageResampling = PILImageResampling.BILINEAR ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : Optional[str] = None ,lowerCamelCase__ : Optional[str] = "" ,**lowerCamelCase__ : List[Any] ,):
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
_UpperCamelCase : Optional[Any] = size if size is not None else {'height': 224, 'width': 224}
_UpperCamelCase : Union[str, Any] = get_size_dict(lowerCamelCase__ )
_UpperCamelCase : Optional[int] = do_resize
_UpperCamelCase : str = size
_UpperCamelCase : Any = resample
_UpperCamelCase : List[str] = apply_ocr
_UpperCamelCase : int = ocr_lang
_UpperCamelCase : Union[str, Any] = tesseract_config
def UpperCamelCase_ ( self : List[Any] ,lowerCamelCase__ : np.ndarray ,lowerCamelCase__ : Dict[str, int] ,lowerCamelCase__ : PILImageResampling = PILImageResampling.BILINEAR ,lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None ,**lowerCamelCase__ : Union[str, Any] ,):
'''simple docstring'''
_UpperCamelCase : Optional[int] = get_size_dict(lowerCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F'The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}' )
_UpperCamelCase : List[str] = (size['height'], size['width'])
return resize(lowerCamelCase__ ,size=lowerCamelCase__ ,resample=lowerCamelCase__ ,data_format=lowerCamelCase__ ,**lowerCamelCase__ )
def UpperCamelCase_ ( self : List[str] ,lowerCamelCase__ : ImageInput ,lowerCamelCase__ : bool = None ,lowerCamelCase__ : Dict[str, int] = None ,lowerCamelCase__ : PILImageResampling = None ,lowerCamelCase__ : bool = None ,lowerCamelCase__ : Optional[str] = None ,lowerCamelCase__ : Optional[str] = None ,lowerCamelCase__ : Optional[Union[str, TensorType]] = None ,lowerCamelCase__ : ChannelDimension = ChannelDimension.FIRST ,**lowerCamelCase__ : List[Any] ,):
'''simple docstring'''
_UpperCamelCase : Optional[Any] = do_resize if do_resize is not None else self.do_resize
_UpperCamelCase : Tuple = size if size is not None else self.size
_UpperCamelCase : Optional[int] = get_size_dict(lowerCamelCase__ )
_UpperCamelCase : Optional[Any] = resample if resample is not None else self.resample
_UpperCamelCase : Union[str, Any] = apply_ocr if apply_ocr is not None else self.apply_ocr
_UpperCamelCase : Union[str, Any] = ocr_lang if ocr_lang is not None else self.ocr_lang
_UpperCamelCase : int = tesseract_config if tesseract_config is not None else self.tesseract_config
_UpperCamelCase : Dict = make_list_of_images(lowerCamelCase__ )
if not valid_images(lowerCamelCase__ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
# All transformations expect numpy arrays.
_UpperCamelCase : Union[str, Any] = [to_numpy_array(lowerCamelCase__ ) for image in images]
if apply_ocr:
requires_backends(self ,'pytesseract' )
_UpperCamelCase : Tuple = []
_UpperCamelCase : Tuple = []
for image in images:
_UpperCamelCase , _UpperCamelCase : Optional[Any] = apply_tesseract(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
words_batch.append(lowerCamelCase__ )
boxes_batch.append(lowerCamelCase__ )
if do_resize:
_UpperCamelCase : Any = [self.resize(image=lowerCamelCase__ ,size=lowerCamelCase__ ,resample=lowerCamelCase__ ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
_UpperCamelCase : Optional[int] = [flip_channel_order(lowerCamelCase__ ) for image in images]
_UpperCamelCase : Dict = [to_channel_dimension_format(lowerCamelCase__ ,lowerCamelCase__ ) for image in images]
_UpperCamelCase : Union[str, Any] = BatchFeature(data={'pixel_values': images} ,tensor_type=lowerCamelCase__ )
if apply_ocr:
_UpperCamelCase : Dict = words_batch
_UpperCamelCase : str = boxes_batch
return data
| 195 |
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
snake_case_ : Union[str, Any] = logging.get_logger(__name__)
snake_case_ : Tuple = '▁'
snake_case_ : List[str] = {
'vocab_file': 'vocab.json',
'spm_file': 'sentencepiece.bpe.model',
'tokenizer_config_file': 'tokenizer_config.json',
}
snake_case_ : Tuple = {
'vocab_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json',
},
'spm_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model',
},
'tokenizer_config_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json',
},
}
snake_case_ : Union[str, Any] = {
'facebook/m2m100_418M': 1024,
}
# fmt: off
snake_case_ : List[Any] = {
'm2m100': ['af', 'am', 'ar', 'ast', 'az', 'ba', 'be', 'bg', 'bn', 'br', 'bs', 'ca', 'ceb', 'cs', 'cy', 'da', 'de', 'el', 'en', 'es', 'et', 'fa', 'ff', 'fi', 'fr', 'fy', 'ga', 'gd', 'gl', 'gu', 'ha', 'he', 'hi', 'hr', 'ht', 'hu', 'hy', 'id', 'ig', 'ilo', 'is', 'it', 'ja', 'jv', 'ka', 'kk', 'km', 'kn', 'ko', 'lb', 'lg', 'ln', 'lo', 'lt', 'lv', 'mg', 'mk', 'ml', 'mn', 'mr', 'ms', 'my', 'ne', 'nl', 'no', 'ns', 'oc', 'or', 'pa', 'pl', 'ps', 'pt', 'ro', 'ru', 'sd', 'si', 'sk', 'sl', 'so', 'sq', 'sr', 'ss', 'su', 'sv', 'sw', 'ta', 'th', 'tl', 'tn', 'tr', 'uk', 'ur', 'uz', 'vi', 'wo', 'xh', 'yi', 'yo', 'zh', 'zu'],
'wmt21': ['en', 'ha', 'is', 'ja', 'cs', 'ru', 'zh', 'de']
}
class lowercase__ ( lowercase ):
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = ["""input_ids""", """attention_mask"""]
lowercase__ = []
lowercase__ = []
def __init__( self : Union[str, Any] ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : Dict=None ,lowerCamelCase__ : Optional[int]=None ,lowerCamelCase__ : Optional[int]="<s>" ,lowerCamelCase__ : Optional[int]="</s>" ,lowerCamelCase__ : str="</s>" ,lowerCamelCase__ : Union[str, Any]="<pad>" ,lowerCamelCase__ : Optional[int]="<unk>" ,lowerCamelCase__ : str="m2m100" ,lowerCamelCase__ : Optional[Dict[str, Any]] = None ,lowerCamelCase__ : Optional[Any]=8 ,**lowerCamelCase__ : str ,):
'''simple docstring'''
_UpperCamelCase : int = {} if sp_model_kwargs is None else sp_model_kwargs
_UpperCamelCase : Optional[int] = language_codes
_UpperCamelCase : Union[str, Any] = FAIRSEQ_LANGUAGE_CODES[language_codes]
_UpperCamelCase : str = {lang_code: F'__{lang_code}__' for lang_code in fairseq_language_code}
_UpperCamelCase : Optional[Any] = kwargs.get('additional_special_tokens' ,[] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(lowerCamelCase__ )
for lang_code in fairseq_language_code
if self.get_lang_token(lowerCamelCase__ ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=lowerCamelCase__ ,tgt_lang=lowerCamelCase__ ,bos_token=lowerCamelCase__ ,eos_token=lowerCamelCase__ ,sep_token=lowerCamelCase__ ,unk_token=lowerCamelCase__ ,pad_token=lowerCamelCase__ ,language_codes=lowerCamelCase__ ,sp_model_kwargs=self.sp_model_kwargs ,num_madeup_words=lowerCamelCase__ ,**lowerCamelCase__ ,)
_UpperCamelCase : Any = vocab_file
_UpperCamelCase : int = load_json(lowerCamelCase__ )
_UpperCamelCase : int = {v: k for k, v in self.encoder.items()}
_UpperCamelCase : Any = spm_file
_UpperCamelCase : Optional[Any] = load_spm(lowerCamelCase__ ,self.sp_model_kwargs )
_UpperCamelCase : Union[str, Any] = len(self.encoder )
_UpperCamelCase : Dict = {
self.get_lang_token(lowerCamelCase__ ): self.encoder_size + i for i, lang_code in enumerate(lowerCamelCase__ )
}
_UpperCamelCase : Any = {lang_code: self.encoder_size + i for i, lang_code in enumerate(lowerCamelCase__ )}
_UpperCamelCase : Dict = {v: k for k, v in self.lang_token_to_id.items()}
_UpperCamelCase : Any = src_lang if src_lang is not None else 'en'
_UpperCamelCase : int = tgt_lang
_UpperCamelCase : Optional[int] = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
_UpperCamelCase : List[Any] = num_madeup_words
@property
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def UpperCamelCase_ ( self : List[Any] ,lowerCamelCase__ : str ):
'''simple docstring'''
_UpperCamelCase : Tuple = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def UpperCamelCase_ ( self : Union[str, Any] ,lowerCamelCase__ : str ):
'''simple docstring'''
return self.sp_model.encode(lowerCamelCase__ ,out_type=lowerCamelCase__ )
def UpperCamelCase_ ( self : Union[str, Any] ,lowerCamelCase__ : str ):
'''simple docstring'''
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(lowerCamelCase__ ,self.encoder[self.unk_token] )
def UpperCamelCase_ ( self : List[str] ,lowerCamelCase__ : int ):
'''simple docstring'''
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(lowerCamelCase__ ,self.unk_token )
def UpperCamelCase_ ( self : List[str] ,lowerCamelCase__ : Union[str, Any] ):
'''simple docstring'''
_UpperCamelCase : List[Any] = []
_UpperCamelCase : Union[str, Any] = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowerCamelCase__ ) + token
_UpperCamelCase : Optional[int] = []
else:
current_sub_tokens.append(lowerCamelCase__ )
out_string += self.sp_model.decode(lowerCamelCase__ )
return out_string.strip()
def UpperCamelCase_ ( self : List[str] ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : Optional[List[int]] = None ,lowerCamelCase__ : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase__ ,token_ids_a=lowerCamelCase__ ,already_has_special_tokens=lowerCamelCase__ )
_UpperCamelCase : Tuple = [1] * len(self.prefix_tokens )
_UpperCamelCase : str = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(lowerCamelCase__ )) + suffix_ones
return prefix_ones + ([0] * len(lowerCamelCase__ )) + ([0] * len(lowerCamelCase__ )) + suffix_ones
def UpperCamelCase_ ( self : List[str] ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_UpperCamelCase : List[Any] = {self.convert_ids_to_tokens(lowerCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Any ):
'''simple docstring'''
_UpperCamelCase : Dict = self.__dict__.copy()
_UpperCamelCase : int = None
return state
def __setstate__( self : Optional[int] ,lowerCamelCase__ : Dict ):
'''simple docstring'''
_UpperCamelCase : Dict = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs' ):
_UpperCamelCase : int = {}
_UpperCamelCase : Dict = load_spm(self.spm_file ,self.sp_model_kwargs )
def UpperCamelCase_ ( self : str ,lowerCamelCase__ : str ,lowerCamelCase__ : Optional[str] = None ):
'''simple docstring'''
_UpperCamelCase : int = Path(lowerCamelCase__ )
if not save_dir.is_dir():
raise OSError(F'{save_directory} should be a directory' )
_UpperCamelCase : List[str] = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['vocab_file']
)
_UpperCamelCase : Optional[Any] = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['spm_file']
)
save_json(self.encoder ,lowerCamelCase__ )
if os.path.abspath(self.spm_file ) != os.path.abspath(lowerCamelCase__ ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file ,lowerCamelCase__ )
elif not os.path.isfile(self.spm_file ):
with open(lowerCamelCase__ ,'wb' ) as fi:
_UpperCamelCase : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase__ )
return (str(lowerCamelCase__ ), str(lowerCamelCase__ ))
def UpperCamelCase_ ( self : Tuple ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : str = "en" ,lowerCamelCase__ : Optional[List[str]] = None ,lowerCamelCase__ : str = "ro" ,**lowerCamelCase__ : List[Any] ,):
'''simple docstring'''
_UpperCamelCase : Optional[Any] = src_lang
_UpperCamelCase : Any = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ )
def UpperCamelCase_ ( self : int ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : Optional[str] ,lowerCamelCase__ : Optional[str] ,**lowerCamelCase__ : Optional[int] ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
_UpperCamelCase : int = src_lang
_UpperCamelCase : Any = self(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ ,**lowerCamelCase__ )
_UpperCamelCase : List[Any] = self.get_lang_id(lowerCamelCase__ )
_UpperCamelCase : Any = tgt_lang_id
return inputs
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
self.set_src_lang_special_tokens(self.src_lang )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCamelCase_ ( self : str ,lowerCamelCase__ : str ):
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = self.get_lang_token(lowerCamelCase__ )
_UpperCamelCase : str = self.lang_token_to_id[lang_token]
_UpperCamelCase : Any = [self.cur_lang_id]
_UpperCamelCase : Union[str, Any] = [self.eos_token_id]
def UpperCamelCase_ ( self : List[Any] ,lowerCamelCase__ : str ):
'''simple docstring'''
_UpperCamelCase : Optional[Any] = self.get_lang_token(lowerCamelCase__ )
_UpperCamelCase : Union[str, Any] = self.lang_token_to_id[lang_token]
_UpperCamelCase : Union[str, Any] = [self.cur_lang_id]
_UpperCamelCase : List[str] = [self.eos_token_id]
def UpperCamelCase_ ( self : Any ,lowerCamelCase__ : str ):
'''simple docstring'''
return self.lang_code_to_token[lang]
def UpperCamelCase_ ( self : List[Any] ,lowerCamelCase__ : str ):
'''simple docstring'''
_UpperCamelCase : List[str] = self.get_lang_token(lowerCamelCase__ )
return self.lang_token_to_id[lang_token]
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase : Union[str, Any] = sentencepiece.SentencePieceProcessor(**UpperCAmelCase_ )
spm.Load(str(UpperCAmelCase_ ) )
return spm
def A__ ( UpperCAmelCase_ ):
with open(UpperCAmelCase_ , 'r' ) as f:
return json.load(UpperCAmelCase_ )
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ ):
with open(UpperCAmelCase_ , 'w' ) as f:
json.dump(UpperCAmelCase_ , UpperCAmelCase_ , indent=2 )
| 195 | 1 |
from random import randint, random
def UpperCamelCase_ ( __a , __a , __a , __a = False , __a = False , __a = 5 , ) -> list:
a__ : Optional[Any] = [[-1] * number_of_cells] # Create a highway without any car
a__ : List[Any] = 0
a__ : Optional[int] = max(__a , 0 )
while i < number_of_cells:
a__ : Tuple = (
randint(0 , __a ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def UpperCamelCase_ ( __a , __a ) -> int:
a__ : Tuple = 0
a__ : Any = highway_now[car_index + 1 :]
for cell in range(len(__a ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(__a , -1 )
def UpperCamelCase_ ( __a , __a , __a ) -> list:
a__ : List[str] = len(__a )
# Beforce calculations, the highway is empty
a__ : Tuple = [-1] * number_of_cells
for car_index in range(__a ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
a__ : Union[str, Any] = min(highway_now[car_index] + 1 , __a )
# Number of empty cell before the next car
a__ : Optional[int] = get_distance(__a , __a ) - 1
# We can't have the car causing an accident
a__ : Dict = min(next_highway[car_index] , __a )
if random() < probability:
# Randomly, a driver will slow down
a__ : List[str] = max(next_highway[car_index] - 1 , 0 )
return next_highway
def UpperCamelCase_ ( __a , __a , __a , __a ) -> list:
a__ : List[Any] = len(highway[0] )
for i in range(__a ):
a__ : List[Any] = update(highway[i] , __a , __a )
a__ : Union[str, Any] = [-1] * number_of_cells
for car_index in range(__a ):
a__ : List[str] = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
a__ : List[str] = (car_index + speed) % number_of_cells
# Commit the change of position
a__ : Optional[int] = speed
highway.append(__a )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 702 |
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 151 | 0 |
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
def lowerCAmelCase (__A):
"""simple docstring"""
_a = torch.load(__A , map_location='''cpu''')
if "model" in sd.keys():
_a = torch.load(__A , map_location='''cpu''')['''model''']
# pop unnecessary weights
_a = [
'''decoder.version''',
'''decoder.output_projection.weight''',
]
for key in keys_to_delete:
if key in sd:
sd.pop(__A)
_a = {
'''decoder.project_in_dim.weight''': '''decoder.project_in.weight''',
'''decoder.project_out_dim.weight''': '''decoder.project_out.weight''',
'''decoder.layer_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.layer_norm.bias''': '''decoder.final_layer_norm.bias''',
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
_a = sd.pop(__A)
_a = list(sd.keys())
for key in keys:
if ".qkv_proj." in key:
_a = sd[key]
# We split QKV in separate Q,K,V
_a = key.replace('''.qkv_proj.''' , '''.q_proj.''')
_a = key.replace('''.qkv_proj.''' , '''.k_proj.''')
_a = key.replace('''.qkv_proj.''' , '''.v_proj.''')
_a = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
_a , _a , _a = torch.split(__A , depth // 3 , dim=0)
_a = q
_a = k
_a = v
del sd[key]
return sd
@torch.no_grad()
def lowerCAmelCase (__A , __A , __A=None):
"""simple docstring"""
_a = load_checkpoint(__A)
if config is not None:
_a = OPTConfig.from_pretrained(__A)
else:
_a = OPTConfig()
_a = OPTModel(__A).half().eval()
model.load_state_dict(__A)
# Check results
Path(__A).mkdir(exist_ok=__A)
model.save_pretrained(__A)
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--fairseq_path",
type=str,
help=(
"path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:"
" https://huggingface.co/models?other=opt_metasq"
),
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--hf_config", default=None, type=str, help="Define HF config.")
lowercase_ = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 11 | """simple docstring"""
def lowerCAmelCase_ (_SCREAMING_SNAKE_CASE :str ) -> int:
assert column_title.isupper()
a_ : int = 0
a_ : Tuple = len(_SCREAMING_SNAKE_CASE ) - 1
a_ : Union[str, Any] = 0
while index >= 0:
a_ : List[Any] = (ord(column_title[index] ) - 64) * pow(26 , _SCREAMING_SNAKE_CASE )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 473 | 0 |
'''simple docstring'''
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_UpperCamelCase = """▁"""
_UpperCamelCase = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
class lowerCamelCase__ ( _A, unittest.TestCase ):
'''simple docstring'''
A__ = BertGenerationTokenizer
A__ = False
A__ = True
def lowercase__ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
super().setUp()
lowerCAmelCase__ = BertGenerationTokenizer(__A , keep_accents=__A )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase__ ( self : List[str] ) -> int:
'''simple docstring'''
lowerCAmelCase__ = """<s>"""
lowerCAmelCase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__A ) , __A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__A ) , __A )
def lowercase__ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """<pad>""" )
self.assertEqual(len(__A ) , 1002 )
def lowercase__ ( self : int ) -> str:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def lowercase__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
lowerCAmelCase__ = BertGenerationTokenizer(__A , keep_accents=__A )
lowerCAmelCase__ = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__A , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__A ) , [285, 46, 10, 170, 382] , )
lowerCAmelCase__ = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__A , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
lowerCAmelCase__ = tokenizer.convert_tokens_to_ids(__A )
self.assertListEqual(
__A , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
lowerCAmelCase__ = tokenizer.convert_ids_to_tokens(__A )
self.assertListEqual(
__A , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def lowercase__ ( self : Tuple ) -> str:
'''simple docstring'''
return BertGenerationTokenizer.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
@slow
def lowercase__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = """Hello World!"""
lowerCAmelCase__ = [1_8536, 2260, 101]
self.assertListEqual(__A , self.big_tokenizer.encode(__A ) )
@slow
def lowercase__ ( self : str ) -> Tuple:
'''simple docstring'''
lowerCAmelCase__ = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
lowerCAmelCase__ = [
871,
419,
358,
946,
991,
2521,
452,
358,
1357,
387,
7751,
3536,
112,
985,
456,
126,
865,
938,
5400,
5734,
458,
1368,
467,
786,
2462,
5246,
1159,
633,
865,
4519,
457,
582,
852,
2557,
427,
916,
508,
405,
3_4324,
497,
391,
408,
1_1342,
1244,
385,
100,
938,
985,
456,
574,
362,
1_2597,
3200,
3129,
1172,
]
self.assertListEqual(__A , self.big_tokenizer.encode(__A ) )
@require_torch
@slow
def lowercase__ ( self : Dict ) -> Dict:
'''simple docstring'''
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
lowerCAmelCase__ = list(self.big_tokenizer.get_vocab().keys() )[:10]
lowerCAmelCase__ = """ """.join(__A )
lowerCAmelCase__ = self.big_tokenizer.encode_plus(__A , return_tensors="""pt""" , return_token_type_ids=__A )
lowerCAmelCase__ = self.big_tokenizer.batch_encode_plus(
[sequence + """ """ + sequence] , return_tensors="""pt""" , return_token_type_ids=__A )
lowerCAmelCase__ = BertGenerationConfig()
lowerCAmelCase__ = BertGenerationEncoder(__A )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**__A )
model(**__A )
@slow
def lowercase__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
lowerCAmelCase__ = {"""input_ids""": [[3_9286, 458, 3_6335, 2001, 456, 1_3073, 1_3266, 455, 113, 7746, 1741, 1_1157, 391, 1_3073, 1_3266, 455, 113, 3967, 3_5412, 113, 4936, 109, 3870, 2377, 113, 3_0084, 4_5720, 458, 134, 1_7496, 112, 503, 1_1672, 113, 118, 112, 5665, 1_3347, 3_8687, 112, 1496, 3_1389, 112, 3268, 4_7264, 134, 962, 112, 1_6377, 8035, 2_3130, 430, 1_2169, 1_5518, 2_8592, 458, 146, 4_1697, 109, 391, 1_2169, 1_5518, 1_6689, 458, 146, 4_1358, 109, 452, 726, 4034, 111, 763, 3_5412, 5082, 388, 1903, 111, 9051, 391, 2870, 4_8918, 1900, 1123, 550, 998, 112, 9586, 1_5985, 455, 391, 410, 2_2955, 3_7636, 114], [448, 1_7496, 419, 3663, 385, 763, 113, 2_7533, 2870, 3283, 1_3043, 1639, 2_4713, 523, 656, 2_4013, 1_8550, 2521, 517, 2_7014, 2_1244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 1_1786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2169, 7687, 2_1932, 1_8146, 726, 363, 1_7032, 3391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__A , model_name="""google/bert_for_seq_generation_L-24_bbc_encoder""" , revision="""c817d1fd1be2ffa69431227a1fe320544943d4db""" , )
| 721 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_UpperCamelCase = {
"""configuration_vivit""": ["""VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """VivitConfig"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = ["""VivitImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
"""VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""VivitModel""",
"""VivitPreTrainedModel""",
"""VivitForVideoClassification""",
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 211 | 0 |
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def a__ ( snake_case , snake_case ):
"""simple docstring"""
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(snake_case , snake_case ) ) )
def a__ ( snake_case , snake_case ):
"""simple docstring"""
if dataset.ndim != value_array.ndim:
__SCREAMING_SNAKE_CASE : Any = (
'''Wrong input data\'s dimensions... '''
F'''dataset : {dataset.ndim}, value_array : {value_array.ndim}'''
)
raise ValueError(snake_case )
try:
if dataset.shape[1] != value_array.shape[1]:
__SCREAMING_SNAKE_CASE : Tuple = (
'''Wrong input data\'s shape... '''
F'''dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}'''
)
raise ValueError(snake_case )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError('''Wrong shape''' )
if dataset.dtype != value_array.dtype:
__SCREAMING_SNAKE_CASE : Optional[Any] = (
'''Input data have different datatype... '''
F'''dataset : {dataset.dtype}, value_array : {value_array.dtype}'''
)
raise TypeError(snake_case )
__SCREAMING_SNAKE_CASE : List[Any] = []
for value in value_array:
__SCREAMING_SNAKE_CASE : Dict = euclidean(snake_case , dataset[0] )
__SCREAMING_SNAKE_CASE : Any = dataset[0].tolist()
for dataset_value in dataset[1:]:
__SCREAMING_SNAKE_CASE : List[str] = euclidean(snake_case , snake_case )
if dist > temp_dist:
__SCREAMING_SNAKE_CASE : Tuple = temp_dist
__SCREAMING_SNAKE_CASE : List[str] = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def a__ ( snake_case , snake_case ):
"""simple docstring"""
return np.dot(snake_case , snake_case ) / (norm(snake_case ) * norm(snake_case ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 74 | from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
A = logging.get_logger(__name__)
class lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCAmelCase_ = ['pixel_values']
def __init__( self : List[str] , snake_case__ : bool = True , snake_case__ : Dict[str, int] = None , snake_case__ : PILImageResampling = PILImageResampling.BICUBIC , snake_case__ : bool = True , snake_case__ : Union[int, float] = 1 / 2_5_5 , snake_case__ : bool = True , snake_case__ : Optional[Union[float, List[float]]] = None , snake_case__ : Optional[Union[float, List[float]]] = None , snake_case__ : bool = True , **snake_case__ : List[Any] , ) -> None:
super().__init__(**snake_case__ )
_lowerCamelCase = size if size is not None else {'height': 3_8_4, 'width': 3_8_4}
_lowerCamelCase = get_size_dict(snake_case__ , default_to_square=snake_case__ )
_lowerCamelCase = do_resize
_lowerCamelCase = size
_lowerCamelCase = resample
_lowerCamelCase = do_rescale
_lowerCamelCase = rescale_factor
_lowerCamelCase = do_normalize
_lowerCamelCase = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
_lowerCamelCase = image_std if image_std is not None else OPENAI_CLIP_STD
_lowerCamelCase = do_convert_rgb
def _snake_case ( self : Union[str, Any] , snake_case__ : np.ndarray , snake_case__ : Dict[str, int] , snake_case__ : PILImageResampling = PILImageResampling.BICUBIC , snake_case__ : Optional[Union[str, ChannelDimension]] = None , **snake_case__ : Dict , ) -> np.ndarray:
_lowerCamelCase = get_size_dict(snake_case__ , default_to_square=snake_case__ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
_lowerCamelCase = (size['height'], size['width'])
return resize(snake_case__ , size=snake_case__ , resample=snake_case__ , data_format=snake_case__ , **snake_case__ )
def _snake_case ( self : Dict , snake_case__ : np.ndarray , snake_case__ : Union[int, float] , snake_case__ : Optional[Union[str, ChannelDimension]] = None , **snake_case__ : Optional[int] , ) -> int:
return rescale(snake_case__ , scale=snake_case__ , data_format=snake_case__ , **snake_case__ )
def _snake_case ( self : Optional[Any] , snake_case__ : np.ndarray , snake_case__ : Union[float, List[float]] , snake_case__ : Union[float, List[float]] , snake_case__ : Optional[Union[str, ChannelDimension]] = None , **snake_case__ : Optional[int] , ) -> np.ndarray:
return normalize(snake_case__ , mean=snake_case__ , std=snake_case__ , data_format=snake_case__ , **snake_case__ )
def _snake_case ( self : List[str] , snake_case__ : ImageInput , snake_case__ : Optional[bool] = None , snake_case__ : Optional[Dict[str, int]] = None , snake_case__ : PILImageResampling = None , snake_case__ : Optional[bool] = None , snake_case__ : Optional[float] = None , snake_case__ : Optional[bool] = None , snake_case__ : Optional[Union[float, List[float]]] = None , snake_case__ : Optional[Union[float, List[float]]] = None , snake_case__ : Optional[Union[str, TensorType]] = None , snake_case__ : bool = None , snake_case__ : ChannelDimension = ChannelDimension.FIRST , **snake_case__ : str , ) -> PIL.Image.Image:
_lowerCamelCase = do_resize if do_resize is not None else self.do_resize
_lowerCamelCase = resample if resample is not None else self.resample
_lowerCamelCase = do_rescale if do_rescale is not None else self.do_rescale
_lowerCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCamelCase = do_normalize if do_normalize is not None else self.do_normalize
_lowerCamelCase = image_mean if image_mean is not None else self.image_mean
_lowerCamelCase = image_std if image_std is not None else self.image_std
_lowerCamelCase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
_lowerCamelCase = size if size is not None else self.size
_lowerCamelCase = get_size_dict(snake_case__ , default_to_square=snake_case__ )
_lowerCamelCase = make_list_of_images(snake_case__ )
if not valid_images(snake_case__ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
_lowerCamelCase = [convert_to_rgb(snake_case__ ) for image in images]
# All transformations expect numpy arrays.
_lowerCamelCase = [to_numpy_array(snake_case__ ) for image in images]
if do_resize:
_lowerCamelCase = [self.resize(image=snake_case__ , size=snake_case__ , resample=snake_case__ ) for image in images]
if do_rescale:
_lowerCamelCase = [self.rescale(image=snake_case__ , scale=snake_case__ ) for image in images]
if do_normalize:
_lowerCamelCase = [self.normalize(image=snake_case__ , mean=snake_case__ , std=snake_case__ ) for image in images]
_lowerCamelCase = [to_channel_dimension_format(snake_case__ , snake_case__ ) for image in images]
_lowerCamelCase = BatchFeature(data={'pixel_values': images} , tensor_type=snake_case__ )
return encoded_outputs | 544 | 0 |
def snake_case_ (__A : int = 6_0_0_8_5_1_4_7_5_1_4_3 ) -> int:
try:
__lowerCAmelCase : Dict = int(__A )
except (TypeError, ValueError):
raise TypeError("""Parameter n must be int or castable to int.""" )
if n <= 0:
raise ValueError("""Parameter n must be greater than or equal to one.""" )
__lowerCAmelCase : str = 2
__lowerCAmelCase : str = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
__lowerCAmelCase : Any = i
while n % i == 0:
__lowerCAmelCase : Union[str, Any] = n // i
i += 1
return int(__A )
if __name__ == "__main__":
print(F'{solution() = }')
| 218 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowerCamelCase : Optional[Any] =(DPMSolverSinglestepScheduler,)
lowerCamelCase : Optional[Any] =(("num_inference_steps", 25),)
def SCREAMING_SNAKE_CASE ( self : str , **lowerCAmelCase : str ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : List[str] = {
"""num_train_timesteps""": 10_00,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""solver_order""": 2,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
"""sample_max_value""": 1.0,
"""algorithm_type""": """dpmsolver++""",
"""solver_type""": """midpoint""",
"""lambda_min_clipped""": -float("""inf""" ),
"""variance_type""": None,
}
config.update(**lowerCAmelCase )
return config
def SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase : Union[str, Any]=0 , **lowerCAmelCase : Dict ) -> int:
"""simple docstring"""
__lowerCAmelCase : Dict = dict(self.forward_default_kwargs )
__lowerCAmelCase : List[str] = kwargs.pop("""num_inference_steps""" , lowerCAmelCase )
__lowerCAmelCase : Any = self.dummy_sample
__lowerCAmelCase : Dict = 0.1 * sample
__lowerCAmelCase : Dict = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
__lowerCAmelCase : Any = self.get_scheduler_config(**lowerCAmelCase )
__lowerCAmelCase : Any = scheduler_class(**lowerCAmelCase )
scheduler.set_timesteps(lowerCAmelCase )
# copy over dummy past residuals
__lowerCAmelCase : Dict = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCAmelCase )
__lowerCAmelCase : Dict = scheduler_class.from_pretrained(lowerCAmelCase )
new_scheduler.set_timesteps(lowerCAmelCase )
# copy over dummy past residuals
__lowerCAmelCase : Union[str, Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
__lowerCAmelCase ,__lowerCAmelCase : Tuple = sample, sample
for t in range(lowerCAmelCase , time_step + scheduler.config.solver_order + 1 ):
__lowerCAmelCase : Any = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ).prev_sample
__lowerCAmelCase : Union[str, Any] = new_scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase : List[str]=0 , **lowerCAmelCase : List[Any] ) -> Dict:
"""simple docstring"""
__lowerCAmelCase : str = dict(self.forward_default_kwargs )
__lowerCAmelCase : Union[str, Any] = kwargs.pop("""num_inference_steps""" , lowerCAmelCase )
__lowerCAmelCase : Optional[int] = self.dummy_sample
__lowerCAmelCase : Dict = 0.1 * sample
__lowerCAmelCase : Optional[int] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
__lowerCAmelCase : Optional[int] = self.get_scheduler_config()
__lowerCAmelCase : List[Any] = scheduler_class(**lowerCAmelCase )
scheduler.set_timesteps(lowerCAmelCase )
# copy over dummy past residuals (must be after setting timesteps)
__lowerCAmelCase : Any = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCAmelCase )
__lowerCAmelCase : Dict = scheduler_class.from_pretrained(lowerCAmelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowerCAmelCase )
# copy over dummy past residual (must be after setting timesteps)
__lowerCAmelCase : Optional[int] = dummy_past_residuals[: new_scheduler.config.solver_order]
__lowerCAmelCase : Union[str, Any] = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ).prev_sample
__lowerCAmelCase : Optional[int] = new_scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase : str=None , **lowerCAmelCase : Optional[Any] ) -> Tuple:
"""simple docstring"""
if scheduler is None:
__lowerCAmelCase : Dict = self.scheduler_classes[0]
__lowerCAmelCase : Dict = self.get_scheduler_config(**lowerCAmelCase )
__lowerCAmelCase : Optional[Any] = scheduler_class(**lowerCAmelCase )
__lowerCAmelCase : List[str] = self.scheduler_classes[0]
__lowerCAmelCase : int = self.get_scheduler_config(**lowerCAmelCase )
__lowerCAmelCase : Any = scheduler_class(**lowerCAmelCase )
__lowerCAmelCase : str = 10
__lowerCAmelCase : str = self.dummy_model()
__lowerCAmelCase : Optional[int] = self.dummy_sample_deter
scheduler.set_timesteps(lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
__lowerCAmelCase : Optional[Any] = model(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : int = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ).prev_sample
return sample
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
"""simple docstring"""
__lowerCAmelCase : str = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
__lowerCAmelCase : Union[str, Any] = 50
__lowerCAmelCase : Union[str, Any] = self.dummy_model()
__lowerCAmelCase : Any = self.dummy_sample_deter
scheduler.set_timesteps(lowerCAmelCase )
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:] ):
__lowerCAmelCase : Dict = model(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : str = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ).prev_sample
__lowerCAmelCase : List[str] = torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2574 ) < 1e-3
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
for timesteps in [25, 50, 1_00, 9_99, 10_00]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : int ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
__lowerCAmelCase : int = self.full_loop(scheduler=lowerCAmelCase )
__lowerCAmelCase : Any = torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2791 ) < 1e-3
__lowerCAmelCase : Optional[Any] = DEISMultistepScheduler.from_config(scheduler.config )
__lowerCAmelCase : Dict = DPMSolverMultistepScheduler.from_config(scheduler.config )
__lowerCAmelCase : Any = UniPCMultistepScheduler.from_config(scheduler.config )
__lowerCAmelCase : Any = DPMSolverSinglestepScheduler.from_config(scheduler.config )
__lowerCAmelCase : List[str] = self.full_loop(scheduler=lowerCAmelCase )
__lowerCAmelCase : str = torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2791 ) < 1e-3
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
self.check_over_configs(thresholding=lowerCAmelCase )
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=lowerCAmelCase , prediction_type=lowerCAmelCase , sample_max_value=lowerCAmelCase , algorithm_type="""dpmsolver++""" , solver_order=lowerCAmelCase , solver_type=lowerCAmelCase , )
def SCREAMING_SNAKE_CASE ( self : int ) -> Any:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=lowerCAmelCase , solver_type=lowerCAmelCase , prediction_type=lowerCAmelCase , algorithm_type=lowerCAmelCase , )
__lowerCAmelCase : Optional[Any] = self.full_loop(
solver_order=lowerCAmelCase , solver_type=lowerCAmelCase , prediction_type=lowerCAmelCase , algorithm_type=lowerCAmelCase , )
assert not torch.isnan(lowerCAmelCase ).any(), "Samples have nan numbers"
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
"""simple docstring"""
self.check_over_configs(lower_order_final=lowerCAmelCase )
self.check_over_configs(lower_order_final=lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
"""simple docstring"""
self.check_over_configs(lambda_min_clipped=-float("""inf""" ) )
self.check_over_configs(lambda_min_clipped=-5.1 )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
"""simple docstring"""
self.check_over_configs(variance_type=lowerCAmelCase )
self.check_over_configs(variance_type="""learned_range""" )
def SCREAMING_SNAKE_CASE ( self : int ) -> str:
"""simple docstring"""
for num_inference_steps in [1, 2, 3, 5, 10, 50, 1_00, 9_99, 10_00]:
self.check_over_forward(num_inference_steps=lowerCAmelCase , time_step=0 )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = self.full_loop()
__lowerCAmelCase : Any = torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2791 ) < 1e-3
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str:
"""simple docstring"""
__lowerCAmelCase : List[Any] = self.full_loop(use_karras_sigmas=lowerCAmelCase )
__lowerCAmelCase : Tuple = torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2248 ) < 1e-3
def SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = self.full_loop(prediction_type="""v_prediction""" )
__lowerCAmelCase : Optional[int] = torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_mean.item() - 0.1453 ) < 1e-3
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : List[str] = self.full_loop(prediction_type="""v_prediction""" , use_karras_sigmas=lowerCAmelCase )
__lowerCAmelCase : List[str] = torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_mean.item() - 0.0649 ) < 1e-3
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : List[str] = self.scheduler_classes[0]
__lowerCAmelCase : Union[str, Any] = self.get_scheduler_config(thresholding=lowerCAmelCase , dynamic_thresholding_ratio=0 )
__lowerCAmelCase : str = scheduler_class(**lowerCAmelCase )
__lowerCAmelCase : List[str] = 10
__lowerCAmelCase : str = self.dummy_model()
__lowerCAmelCase : List[Any] = self.dummy_sample_deter.half()
scheduler.set_timesteps(lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
__lowerCAmelCase : Union[str, Any] = model(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : List[Any] = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ).prev_sample
assert sample.dtype == torch.floataa
| 218 | 1 |
'''simple docstring'''
import math
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : int , lowerCAmelCase__ : Dict=0 ): # a graph with Node 0,1,...,N-1
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = n
__SCREAMING_SNAKE_CASE : List[Any] = [
[math.inf for j in range(0 , snake_case__ )] for i in range(0 , snake_case__ )
] # adjacency matrix for weight
__SCREAMING_SNAKE_CASE : Union[str, Any] = [
[math.inf for j in range(0 , snake_case__ )] for i in range(0 , snake_case__ )
] # dp[i][j] stores minimum distance from i to j
def UpperCamelCase__ ( self : Tuple , lowerCAmelCase__ : int , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = w
def UpperCamelCase__ ( self : str ):
"""simple docstring"""
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
__SCREAMING_SNAKE_CASE : str = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def UpperCamelCase__ ( self : List[str] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Any ):
"""simple docstring"""
return self.dp[u][v]
if __name__ == "__main__":
UpperCamelCase__ : Dict = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3) | 578 |
"""simple docstring"""
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase_ :
def __init__( self , snake_case__ , snake_case__=13 , snake_case__=7 , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=99 , snake_case__=32 , snake_case__=5 , snake_case__=4 , snake_case__=37 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=5_12 , snake_case__=16 , snake_case__=2 , snake_case__=0.02 , snake_case__=3 , snake_case__=4 , snake_case__=None , ) -> int:
"""simple docstring"""
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_input_mask
UpperCAmelCase = use_token_type_ids
UpperCAmelCase = use_labels
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = num_labels
UpperCAmelCase = num_choices
UpperCAmelCase = scope
def UpperCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase = None
if self.use_input_mask:
UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase = None
if self.use_token_type_ids:
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
return NystromformerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case__ , initializer_range=self.initializer_range , )
def UpperCamelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = NystromformerModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ )
UpperCAmelCase = model(snake_case__ , token_type_ids=snake_case__ )
UpperCAmelCase = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> int:
"""simple docstring"""
UpperCAmelCase = NystromformerForMaskedLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> Dict:
"""simple docstring"""
UpperCAmelCase = NystromformerForQuestionAnswering(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = self.num_labels
UpperCAmelCase = NystromformerForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> int:
"""simple docstring"""
UpperCAmelCase = self.num_labels
UpperCAmelCase = NystromformerForTokenClassification(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> Dict:
"""simple docstring"""
UpperCAmelCase = self.num_choices
UpperCAmelCase = NystromformerForMultipleChoice(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) = config_and_inputs
UpperCAmelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( a_ , a_ , unittest.TestCase ):
_A : Optional[Any] = (
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
_A : Optional[Any] = (
{
'feature-extraction': NystromformerModel,
'fill-mask': NystromformerForMaskedLM,
'question-answering': NystromformerForQuestionAnswering,
'text-classification': NystromformerForSequenceClassification,
'token-classification': NystromformerForTokenClassification,
'zero-shot': NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
_A : int = False
_A : Dict = False
def UpperCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = NystromformerModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=snake_case__ , hidden_size=37 )
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase = type
self.model_tester.create_and_check_model(*snake_case__ )
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case__ )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case__ )
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case__ )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case__ )
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case__ )
@slow
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = NystromformerModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
@require_torch
class UpperCamelCase_ ( unittest.TestCase ):
@slow
def UpperCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = NystromformerModel.from_pretrained("""uw-madison/nystromformer-512""" )
UpperCAmelCase = torch.tensor([[0, 1, 2, 3, 4, 5]] )
with torch.no_grad():
UpperCAmelCase = model(snake_case__ )[0]
UpperCAmelCase = torch.Size((1, 6, 7_68) )
self.assertEqual(output.shape , snake_case__ )
UpperCAmelCase = torch.tensor(
[[[-0.4_532, -0.0_936, 0.5_137], [-0.2_676, 0.0_628, 0.6_186], [-0.3_629, -0.1_726, 0.4_716]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case__ , atol=1e-4 ) )
@slow
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
UpperCAmelCase = """the [MASK] of Belgium is Brussels"""
UpperCAmelCase = AutoTokenizer.from_pretrained("""uw-madison/nystromformer-512""" )
UpperCAmelCase = NystromformerForMaskedLM.from_pretrained("""uw-madison/nystromformer-512""" )
UpperCAmelCase = tokenizer(snake_case__ , return_tensors="""pt""" )
with torch.no_grad():
UpperCAmelCase = model(encoding.input_ids ).logits
UpperCAmelCase = token_logits[:, 2, :].argmax(-1 )[0]
self.assertEqual(tokenizer.decode(snake_case__ ) , """capital""" )
| 673 | 0 |
from math import factorial
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Optional[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : List[str] ):
__snake_case : Optional[int] = real
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
__snake_case : Dict = [1] * rank
else:
__snake_case : Any = rank
def __repr__( self : Optional[Any] ):
return (
f'''{self.real}+'''
f'''{'+'.join(str(_lowerCAmelCase )+'E'+str(n+1 )for n,dual in enumerate(self.duals ) )}'''
)
def snake_case__ ( self : List[Any] ):
__snake_case : Optional[int] = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , _lowerCAmelCase )
def __add__( self : Optional[Any] , _lowerCAmelCase : List[str] ):
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
return Dual(self.real + other , self.duals )
__snake_case : Union[str, Any] = self.duals.copy()
__snake_case : Optional[Any] = other.duals.copy()
if len(_lowerCAmelCase ) > len(_lowerCAmelCase ):
o_dual.extend([1] * (len(_lowerCAmelCase ) - len(_lowerCAmelCase )) )
elif len(_lowerCAmelCase ) < len(_lowerCAmelCase ):
s_dual.extend([1] * (len(_lowerCAmelCase ) - len(_lowerCAmelCase )) )
__snake_case : Dict = []
for i in range(len(_lowerCAmelCase ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , _lowerCAmelCase )
A : str = __add__
def __sub__( self : List[str] , _lowerCAmelCase : int ):
return self + other * -1
def __mul__( self : Dict , _lowerCAmelCase : Any ):
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
__snake_case : Optional[Any] = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , _lowerCAmelCase )
__snake_case : str = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , _lowerCAmelCase )
A : int = __mul__
def __truediv__( self : Any , _lowerCAmelCase : Optional[Any] ):
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
__snake_case : List[Any] = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , _lowerCAmelCase )
raise ValueError
def __floordiv__( self : Optional[int] , _lowerCAmelCase : List[str] ):
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
__snake_case : Optional[Any] = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , _lowerCAmelCase )
raise ValueError
def __pow__( self : Dict , _lowerCAmelCase : Any ):
if n < 0 or isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise ValueError("""power must be a positive integer""" )
if n == 0:
return 1
if n == 1:
return self
__snake_case : List[str] = self
for _ in range(n - 1 ):
x *= self
return x
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
if not callable(__SCREAMING_SNAKE_CASE ):
raise ValueError("""differentiate() requires a function as input for func""" )
if not isinstance(__SCREAMING_SNAKE_CASE , (float, int) ):
raise ValueError("""differentiate() requires a float as input for position""" )
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
raise ValueError("""differentiate() requires an int as input for order""" )
__snake_case : List[Any] = Dual(__SCREAMING_SNAKE_CASE , 1 )
__snake_case : Union[str, Any] = func(__SCREAMING_SNAKE_CASE )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
return y**2 * y**4
print(differentiate(f, 9, 2))
| 713 | from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 390 | 0 |
import numpy as np
UpperCamelCase = [
["a", "b", "c", "d", "e"],
["f", "g", "h", "i", "k"],
["l", "m", "n", "o", "p"],
["q", "r", "s", "t", "u"],
["v", "w", "x", "y", "z"],
]
class lowerCAmelCase_ :
def __init__( self ):
_lowercase : Any = np.array(_lowerCAmelCase )
def __a ( self , _lowerCAmelCase ):
_lowercase , _lowercase : List[str] = np.where(letter == self.SQUARE )
_lowercase : Optional[int] = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def __a ( self , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : int = self.SQUARE[indexa - 1, indexa - 1]
return letter
def __a ( self , _lowerCAmelCase ):
_lowercase : int = message.lower()
_lowercase : Optional[Any] = message.replace(' ' , '' )
_lowercase : Optional[Any] = message.replace('j' , 'i' )
_lowercase : Optional[int] = np.empty((2, len(_lowerCAmelCase )) )
for letter_index in range(len(_lowerCAmelCase ) ):
_lowercase : Any = self.letter_to_numbers(message[letter_index] )
_lowercase : Tuple = numbers[0]
_lowercase : int = numbers[1]
_lowercase : int = first_step.reshape(2 * len(_lowerCAmelCase ) )
_lowercase : Tuple = ''
for numbers_index in range(len(_lowerCAmelCase ) ):
_lowercase : int = int(second_step[numbers_index * 2] )
_lowercase : List[Any] = int(second_step[(numbers_index * 2) + 1] )
_lowercase : Optional[int] = self.numbers_to_letter(_lowerCAmelCase , _lowerCAmelCase )
_lowercase : List[Any] = encoded_message + letter
return encoded_message
def __a ( self , _lowerCAmelCase ):
_lowercase : Tuple = message.lower()
message.replace(' ' , '' )
_lowercase : Optional[Any] = np.empty(2 * len(_lowerCAmelCase ) )
for letter_index in range(len(_lowerCAmelCase ) ):
_lowercase : List[Any] = self.letter_to_numbers(message[letter_index] )
_lowercase : int = numbers[0]
_lowercase : Union[str, Any] = numbers[1]
_lowercase : Optional[int] = first_step.reshape((2, len(_lowerCAmelCase )) )
_lowercase : Tuple = ''
for numbers_index in range(len(_lowerCAmelCase ) ):
_lowercase : Tuple = int(second_step[0, numbers_index] )
_lowercase : Dict = int(second_step[1, numbers_index] )
_lowercase : List[Any] = self.numbers_to_letter(_lowerCAmelCase , _lowerCAmelCase )
_lowercase : Dict = decoded_message + letter
return decoded_message
| 66 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> List[Any]:
_lowercase : int = 384
if "tiny" in model_name:
_lowercase : Tuple = [3, 3, 9, 3]
_lowercase : List[str] = [96, 192, 384, 768]
if "small" in model_name:
_lowercase : List[str] = [3, 3, 27, 3]
_lowercase : Union[str, Any] = [96, 192, 384, 768]
if "base" in model_name:
_lowercase : List[Any] = [3, 3, 27, 3]
_lowercase : Dict = [128, 256, 512, 1_024]
_lowercase : Optional[int] = 512
if "large" in model_name:
_lowercase : List[str] = [3, 3, 27, 3]
_lowercase : List[Any] = [192, 384, 768, 1_536]
_lowercase : Tuple = 768
if "xlarge" in model_name:
_lowercase : str = [3, 3, 27, 3]
_lowercase : List[str] = [256, 512, 1_024, 2_048]
_lowercase : Tuple = 1_024
# set label information
_lowercase : Dict = 150
_lowercase : Union[str, Any] = 'huggingface/label-files'
_lowercase : str = 'ade20k-id2label.json'
_lowercase : List[Any] = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
_lowercase : Dict = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
_lowercase : Tuple = {v: k for k, v in idalabel.items()}
_lowercase : List[str] = ConvNextConfig(
depths=SCREAMING_SNAKE_CASE , hidden_sizes=SCREAMING_SNAKE_CASE , out_features=['stage1', 'stage2', 'stage3', 'stage4'] )
_lowercase : Union[str, Any] = UperNetConfig(
backbone_config=SCREAMING_SNAKE_CASE , auxiliary_in_channels=SCREAMING_SNAKE_CASE , num_labels=SCREAMING_SNAKE_CASE , idalabel=SCREAMING_SNAKE_CASE , labelaid=SCREAMING_SNAKE_CASE , )
return config
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> int:
_lowercase : Any = []
# fmt: off
# stem
rename_keys.append(('backbone.downsample_layers.0.0.weight', 'backbone.embeddings.patch_embeddings.weight') )
rename_keys.append(('backbone.downsample_layers.0.0.bias', 'backbone.embeddings.patch_embeddings.bias') )
rename_keys.append(('backbone.downsample_layers.0.1.weight', 'backbone.embeddings.layernorm.weight') )
rename_keys.append(('backbone.downsample_layers.0.1.bias', 'backbone.embeddings.layernorm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"""backbone.stages.{i}.{j}.gamma""", F"""backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.depthwise_conv.weight""", F"""backbone.encoder.stages.{i}.layers.{j}.dwconv.weight""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.depthwise_conv.bias""", F"""backbone.encoder.stages.{i}.layers.{j}.dwconv.bias""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.norm.weight""", F"""backbone.encoder.stages.{i}.layers.{j}.layernorm.weight""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.norm.bias""", F"""backbone.encoder.stages.{i}.layers.{j}.layernorm.bias""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.pointwise_conv1.weight""", F"""backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.pointwise_conv1.bias""", F"""backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.pointwise_conv2.weight""", F"""backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.pointwise_conv2.bias""", F"""backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias""") )
if i > 0:
rename_keys.append((F"""backbone.downsample_layers.{i}.0.weight""", F"""backbone.encoder.stages.{i}.downsampling_layer.0.weight""") )
rename_keys.append((F"""backbone.downsample_layers.{i}.0.bias""", F"""backbone.encoder.stages.{i}.downsampling_layer.0.bias""") )
rename_keys.append((F"""backbone.downsample_layers.{i}.1.weight""", F"""backbone.encoder.stages.{i}.downsampling_layer.1.weight""") )
rename_keys.append((F"""backbone.downsample_layers.{i}.1.bias""", F"""backbone.encoder.stages.{i}.downsampling_layer.1.bias""") )
rename_keys.append((F"""backbone.norm{i}.weight""", F"""backbone.hidden_states_norms.stage{i+1}.weight""") )
rename_keys.append((F"""backbone.norm{i}.bias""", F"""backbone.hidden_states_norms.stage{i+1}.bias""") )
# decode head
rename_keys.extend(
[
('decode_head.conv_seg.weight', 'decode_head.classifier.weight'),
('decode_head.conv_seg.bias', 'decode_head.classifier.bias'),
('auxiliary_head.conv_seg.weight', 'auxiliary_head.classifier.weight'),
('auxiliary_head.conv_seg.bias', 'auxiliary_head.classifier.bias'),
] )
# fmt: on
return rename_keys
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[int]:
_lowercase : Any = dct.pop(SCREAMING_SNAKE_CASE )
_lowercase : Any = val
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any:
_lowercase : List[Any] = {
'upernet-convnext-tiny': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth',
'upernet-convnext-small': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth',
'upernet-convnext-base': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth',
'upernet-convnext-large': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth',
'upernet-convnext-xlarge': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth',
}
_lowercase : Optional[int] = model_name_to_url[model_name]
_lowercase : str = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE , map_location='cpu' )['state_dict']
_lowercase : Optional[int] = get_upernet_config(SCREAMING_SNAKE_CASE )
_lowercase : Tuple = UperNetForSemanticSegmentation(SCREAMING_SNAKE_CASE )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
_lowercase : List[Any] = state_dict.pop(SCREAMING_SNAKE_CASE )
if "bn" in key:
_lowercase : Any = key.replace('bn' , 'batch_norm' )
_lowercase : Any = val
# rename keys
_lowercase : int = create_rename_keys(SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
model.load_state_dict(SCREAMING_SNAKE_CASE )
# verify on image
_lowercase : Union[str, Any] = 'https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'
_lowercase : Any = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw ).convert('RGB' )
_lowercase : Tuple = SegformerImageProcessor()
_lowercase : Tuple = processor(SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
with torch.no_grad():
_lowercase : Dict = model(SCREAMING_SNAKE_CASE )
if model_name == "upernet-convnext-tiny":
_lowercase : Dict = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] )
elif model_name == "upernet-convnext-small":
_lowercase : Union[str, Any] = torch.tensor(
[[-8.8236, -8.8236, -8.6771], [-8.8236, -8.8236, -8.6771], [-8.7638, -8.7638, -8.6240]] )
elif model_name == "upernet-convnext-base":
_lowercase : Dict = torch.tensor(
[[-8.8558, -8.8558, -8.6905], [-8.8558, -8.8558, -8.6905], [-8.7669, -8.7669, -8.6021]] )
elif model_name == "upernet-convnext-large":
_lowercase : Optional[int] = torch.tensor(
[[-8.6660, -8.6660, -8.6210], [-8.6660, -8.6660, -8.6210], [-8.6310, -8.6310, -8.5964]] )
elif model_name == "upernet-convnext-xlarge":
_lowercase : str = torch.tensor(
[[-8.4980, -8.4980, -8.3977], [-8.4980, -8.4980, -8.3977], [-8.4379, -8.4379, -8.3412]] )
print('Logits:' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE )
print(F"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(SCREAMING_SNAKE_CASE )
if push_to_hub:
print(F"""Pushing model and processor for {model_name} to hub""" )
model.push_to_hub(F"""openmmlab/{model_name}""" )
processor.push_to_hub(F"""openmmlab/{model_name}""" )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="upernet-convnext-tiny",
type=str,
choices=[f'''upernet-convnext-{size}''' for size in ["tiny", "small", "base", "large", "xlarge"]],
help="Name of the ConvNext UperNet model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
UpperCamelCase = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 66 | 1 |
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class __A:
@staticmethod
def lowercase__ ( *__UpperCamelCase : Any , **__UpperCamelCase : List[str] ):
pass
def __lowerCAmelCase ( UpperCAmelCase__ : int ) -> str:
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
lowercase = (
'''https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'''
)
@is_pipeline_test
@require_torch
@require_vision
class __A( unittest.TestCase ):
SCREAMING_SNAKE_CASE = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def lowercase__ ( self : Optional[int] , __UpperCamelCase : List[str] , __UpperCamelCase : str , __UpperCamelCase : List[str] ):
lowerCamelCase_ = pipeline(
"""document-question-answering""" , model=__UpperCamelCase , tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
lowerCamelCase_ = INVOICE_URL
lowerCamelCase_ = list(zip(*apply_tesseract(load_image(__UpperCamelCase ) , __UpperCamelCase , """""" ) ) )
lowerCamelCase_ = """What is the placebo?"""
lowerCamelCase_ = [
{
"""image""": load_image(__UpperCamelCase ),
"""question""": question,
},
{
"""image""": image,
"""question""": question,
},
{
"""image""": image,
"""question""": question,
"""word_boxes""": word_boxes,
},
]
return dqa_pipeline, examples
def lowercase__ ( self : Union[str, Any] , __UpperCamelCase : Any , __UpperCamelCase : Any ):
lowerCamelCase_ = dqa_pipeline(__UpperCamelCase , top_k=2 )
self.assertEqual(
__UpperCamelCase , [
[
{"""score""": ANY(__UpperCamelCase ), """answer""": ANY(__UpperCamelCase ), """start""": ANY(__UpperCamelCase ), """end""": ANY(__UpperCamelCase )},
{"""score""": ANY(__UpperCamelCase ), """answer""": ANY(__UpperCamelCase ), """start""": ANY(__UpperCamelCase ), """end""": ANY(__UpperCamelCase )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def lowercase__ ( self : int ):
lowerCamelCase_ = pipeline("""document-question-answering""" , model="""hf-internal-testing/tiny-random-layoutlmv2""" )
lowerCamelCase_ = INVOICE_URL
lowerCamelCase_ = """How many cats are there?"""
lowerCamelCase_ = [
{"""score""": 0.0001, """answer""": """oy 2312/2019""", """start""": 3_8, """end""": 3_9},
{"""score""": 0.0001, """answer""": """oy 2312/2019 DUE""", """start""": 3_8, """end""": 4_0},
]
lowerCamelCase_ = dqa_pipeline(image=__UpperCamelCase , question=__UpperCamelCase , top_k=2 )
self.assertEqual(nested_simplify(__UpperCamelCase , decimals=4 ) , __UpperCamelCase )
lowerCamelCase_ = dqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(nested_simplify(__UpperCamelCase , decimals=4 ) , __UpperCamelCase )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
lowerCamelCase_ = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
lowerCamelCase_ = dqa_pipeline(image=__UpperCamelCase , question=__UpperCamelCase , top_k=2 )
self.assertEqual(__UpperCamelCase , [] )
# We can optionnally pass directly the words and bounding boxes
lowerCamelCase_ = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
lowerCamelCase_ = []
lowerCamelCase_ = []
lowerCamelCase_ = dqa_pipeline(image=__UpperCamelCase , question=__UpperCamelCase , words=__UpperCamelCase , boxes=__UpperCamelCase , top_k=2 )
self.assertEqual(__UpperCamelCase , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def lowercase__ ( self : Union[str, Any] ):
lowerCamelCase_ = pipeline(
"""document-question-answering""" , model="""tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa""" , revision="""9977165""" , )
lowerCamelCase_ = INVOICE_URL
lowerCamelCase_ = """What is the invoice number?"""
lowerCamelCase_ = dqa_pipeline(image=__UpperCamelCase , question=__UpperCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{"""score""": 0.9944, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
{"""score""": 0.0009, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
] , )
lowerCamelCase_ = dqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{"""score""": 0.9944, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
{"""score""": 0.0009, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
] , )
lowerCamelCase_ = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
[
{"""score""": 0.9944, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
{"""score""": 0.0009, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def lowercase__ ( self : str ):
lowerCamelCase_ = pipeline(
"""document-question-answering""" , model="""tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa""" , revision="""9977165""" , max_seq_len=5_0 , )
lowerCamelCase_ = INVOICE_URL
lowerCamelCase_ = """What is the invoice number?"""
lowerCamelCase_ = dqa_pipeline(image=__UpperCamelCase , question=__UpperCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{"""score""": 0.9974, """answer""": """1110212019""", """start""": 2_3, """end""": 2_3},
{"""score""": 0.9948, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
] , )
lowerCamelCase_ = dqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{"""score""": 0.9974, """answer""": """1110212019""", """start""": 2_3, """end""": 2_3},
{"""score""": 0.9948, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
] , )
lowerCamelCase_ = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
[
{"""score""": 0.9974, """answer""": """1110212019""", """start""": 2_3, """end""": 2_3},
{"""score""": 0.9948, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def lowercase__ ( self : List[Any] ):
lowerCamelCase_ = AutoTokenizer.from_pretrained(
"""impira/layoutlm-document-qa""" , revision="""3dc6de3""" , add_prefix_space=__UpperCamelCase )
lowerCamelCase_ = pipeline(
"""document-question-answering""" , model="""impira/layoutlm-document-qa""" , tokenizer=__UpperCamelCase , revision="""3dc6de3""" , )
lowerCamelCase_ = INVOICE_URL
lowerCamelCase_ = """What is the invoice number?"""
lowerCamelCase_ = dqa_pipeline(image=__UpperCamelCase , question=__UpperCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{"""score""": 0.4251, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
{"""score""": 0.0819, """answer""": """1110212019""", """start""": 2_3, """end""": 2_3},
] , )
lowerCamelCase_ = dqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{"""score""": 0.4251, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
{"""score""": 0.0819, """answer""": """1110212019""", """start""": 2_3, """end""": 2_3},
] , )
lowerCamelCase_ = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
[
{"""score""": 0.4251, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
{"""score""": 0.0819, """answer""": """1110212019""", """start""": 2_3, """end""": 2_3},
]
]
* 2 , )
lowerCamelCase_ = list(zip(*apply_tesseract(load_image(__UpperCamelCase ) , __UpperCamelCase , """""" ) ) )
# This model should also work if `image` is set to None
lowerCamelCase_ = dqa_pipeline({"""image""": None, """word_boxes""": word_boxes, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{"""score""": 0.4251, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
{"""score""": 0.0819, """answer""": """1110212019""", """start""": 2_3, """end""": 2_3},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def lowercase__ ( self : Union[str, Any] ):
lowerCamelCase_ = AutoTokenizer.from_pretrained(
"""impira/layoutlm-document-qa""" , revision="""3dc6de3""" , add_prefix_space=__UpperCamelCase )
lowerCamelCase_ = pipeline(
"""document-question-answering""" , model="""impira/layoutlm-document-qa""" , tokenizer=__UpperCamelCase , revision="""3dc6de3""" , max_seq_len=5_0 , )
lowerCamelCase_ = INVOICE_URL
lowerCamelCase_ = """What is the invoice number?"""
lowerCamelCase_ = dqa_pipeline(image=__UpperCamelCase , question=__UpperCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{"""score""": 0.9999, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
{"""score""": 0.9998, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
] , )
lowerCamelCase_ = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
[
{"""score""": 0.9999, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
{"""score""": 0.9998, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
]
]
* 2 , )
lowerCamelCase_ = list(zip(*apply_tesseract(load_image(__UpperCamelCase ) , __UpperCamelCase , """""" ) ) )
# This model should also work if `image` is set to None
lowerCamelCase_ = dqa_pipeline({"""image""": None, """word_boxes""": word_boxes, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{"""score""": 0.9999, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
{"""score""": 0.9998, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
] , )
@slow
@require_torch
def lowercase__ ( self : List[str] ):
lowerCamelCase_ = pipeline(
"""document-question-answering""" , model="""naver-clova-ix/donut-base-finetuned-docvqa""" , tokenizer=AutoTokenizer.from_pretrained("""naver-clova-ix/donut-base-finetuned-docvqa""" ) , feature_extractor="""naver-clova-ix/donut-base-finetuned-docvqa""" , )
lowerCamelCase_ = INVOICE_URL
lowerCamelCase_ = """What is the invoice number?"""
lowerCamelCase_ = dqa_pipeline(image=__UpperCamelCase , question=__UpperCamelCase , top_k=2 )
self.assertEqual(nested_simplify(__UpperCamelCase , decimals=4 ) , [{"""answer""": """us-001"""}] )
@require_tf
@unittest.skip("""Document question answering not implemented in TF""" )
def lowercase__ ( self : int ):
pass
| 103 |
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class __A:
@staticmethod
def lowercase__ ( *__UpperCamelCase : str , **__UpperCamelCase : Dict ):
pass
def __lowerCAmelCase ( UpperCAmelCase__ : Image ) -> str:
lowerCamelCase_ = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class __A( unittest.TestCase ):
SCREAMING_SNAKE_CASE = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def lowercase__ ( self : Union[str, Any] , __UpperCamelCase : int , __UpperCamelCase : Optional[int] , __UpperCamelCase : int ):
lowerCamelCase_ = DepthEstimationPipeline(model=__UpperCamelCase , image_processor=__UpperCamelCase )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def lowercase__ ( self : Any , __UpperCamelCase : List[str] , __UpperCamelCase : Union[str, Any] ):
lowerCamelCase_ = depth_estimator("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
self.assertEqual({"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )} , __UpperCamelCase )
import datasets
lowerCamelCase_ = datasets.load_dataset("""hf-internal-testing/fixtures_image_utils""" , """image""" , split="""test""" )
lowerCamelCase_ = depth_estimator(
[
Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ),
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
# RGBA
dataset[0]["""file"""],
# LA
dataset[1]["""file"""],
# L
dataset[2]["""file"""],
] )
self.assertEqual(
[
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
] , __UpperCamelCase , )
@require_tf
@unittest.skip("""Depth estimation is not implemented in TF""" )
def lowercase__ ( self : Optional[int] ):
pass
@slow
@require_torch
def lowercase__ ( self : List[Any] ):
lowerCamelCase_ = """Intel/dpt-large"""
lowerCamelCase_ = pipeline("""depth-estimation""" , model=__UpperCamelCase )
lowerCamelCase_ = depth_estimator("""http://images.cocodataset.org/val2017/000000039769.jpg""" )
lowerCamelCase_ = hashimage(outputs["""depth"""] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs["""predicted_depth"""].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs["""predicted_depth"""].min().item() ) , 2.662 )
@require_torch
def lowercase__ ( self : Dict ):
# This is highly irregular to have no small tests.
self.skipTest("""There is not hf-internal-testing tiny model for either GLPN nor DPT""" )
| 103 | 1 |
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class __lowercase ( A ):
__magic_name__ : Union[str, Any] = (IPNDMScheduler,)
__magic_name__ : Optional[Any] = (('''num_inference_steps''', 50),)
def lowerCAmelCase_ ( self , **a__ ) -> List[Any]:
'''simple docstring'''
A_ = {'''num_train_timesteps''': 1_0_0_0}
config.update(**a__ )
return config
def lowerCAmelCase_ ( self , a__=0 , **a__ ) -> Tuple:
'''simple docstring'''
A_ = dict(self.forward_default_kwargs )
A_ = kwargs.pop('''num_inference_steps''' , a__ )
A_ = self.dummy_sample
A_ = 0.1 * sample
A_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
A_ = self.get_scheduler_config(**a__ )
A_ = scheduler_class(**a__ )
scheduler.set_timesteps(a__ )
# copy over dummy past residuals
A_ = dummy_past_residuals[:]
if time_step is None:
A_ = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a__ )
A_ = scheduler_class.from_pretrained(a__ )
new_scheduler.set_timesteps(a__ )
# copy over dummy past residuals
A_ = dummy_past_residuals[:]
A_ = scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
A_ = new_scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
A_ = scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
A_ = new_scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCAmelCase_ ( self ) -> List[str]:
'''simple docstring'''
pass
def lowerCAmelCase_ ( self , a__=0 , **a__ ) -> Any:
'''simple docstring'''
A_ = dict(self.forward_default_kwargs )
A_ = kwargs.pop('''num_inference_steps''' , a__ )
A_ = self.dummy_sample
A_ = 0.1 * sample
A_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
A_ = self.get_scheduler_config()
A_ = scheduler_class(**a__ )
scheduler.set_timesteps(a__ )
# copy over dummy past residuals (must be after setting timesteps)
A_ = dummy_past_residuals[:]
if time_step is None:
A_ = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a__ )
A_ = scheduler_class.from_pretrained(a__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(a__ )
# copy over dummy past residual (must be after setting timesteps)
A_ = dummy_past_residuals[:]
A_ = scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
A_ = new_scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
A_ = scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
A_ = new_scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCAmelCase_ ( self , **a__ ) -> Any:
'''simple docstring'''
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config(**a__ )
A_ = scheduler_class(**a__ )
A_ = 1_0
A_ = self.dummy_model()
A_ = self.dummy_sample_deter
scheduler.set_timesteps(a__ )
for i, t in enumerate(scheduler.timesteps ):
A_ = model(a__ , a__ )
A_ = scheduler.step(a__ , a__ , a__ ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
A_ = model(a__ , a__ )
A_ = scheduler.step(a__ , a__ , a__ ).prev_sample
return sample
def lowerCAmelCase_ ( self ) -> str:
'''simple docstring'''
A_ = dict(self.forward_default_kwargs )
A_ = kwargs.pop('''num_inference_steps''' , a__ )
for scheduler_class in self.scheduler_classes:
A_ = self.get_scheduler_config()
A_ = scheduler_class(**a__ )
A_ = self.dummy_sample
A_ = 0.1 * sample
if num_inference_steps is not None and hasattr(a__ , '''set_timesteps''' ):
scheduler.set_timesteps(a__ )
elif num_inference_steps is not None and not hasattr(a__ , '''set_timesteps''' ):
A_ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
A_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
A_ = dummy_past_residuals[:]
A_ = scheduler.timesteps[5]
A_ = scheduler.timesteps[6]
A_ = scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
A_ = scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
A_ = scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
A_ = scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowerCAmelCase_ ( self ) -> List[str]:
'''simple docstring'''
for timesteps in [1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=a__ , time_step=a__ )
def lowerCAmelCase_ ( self ) -> str:
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 1_0] , [1_0, 5_0, 1_0_0] ):
self.check_over_forward(num_inference_steps=a__ , time_step=a__ )
def lowerCAmelCase_ ( self ) -> Dict:
'''simple docstring'''
A_ = self.full_loop()
A_ = torch.mean(torch.abs(a__ ) )
assert abs(result_mean.item() - 2_5_4_0_5_2_9 ) < 1_0 | 141 |
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class __lowercase ( unittest.TestCase ):
__magic_name__ : int = MODEL_FOR_MASKED_LM_MAPPING
__magic_name__ : Dict = TF_MODEL_FOR_MASKED_LM_MAPPING
def lowerCAmelCase_ ( self ) -> Dict:
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def lowerCAmelCase_ ( self ) -> str:
'''simple docstring'''
A_ = pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , top_k=2 , framework='''tf''' )
A_ = unmasker('''My name is <mask>''' )
self.assertEqual(
nested_simplify(a__ , decimals=6 ) , [
{'''sequence''': '''My name is grouped''', '''score''': 2.1E-05, '''token''': 3_8_0_1_5, '''token_str''': ''' grouped'''},
{'''sequence''': '''My name is accuser''', '''score''': 2.1E-05, '''token''': 2_5_5_0_6, '''token_str''': ''' accuser'''},
] , )
A_ = unmasker('''The largest city in France is <mask>''' )
self.assertEqual(
nested_simplify(a__ , decimals=6 ) , [
{
'''sequence''': '''The largest city in France is grouped''',
'''score''': 2.1E-05,
'''token''': 3_8_0_1_5,
'''token_str''': ''' grouped''',
},
{
'''sequence''': '''The largest city in France is accuser''',
'''score''': 2.1E-05,
'''token''': 2_5_5_0_6,
'''token_str''': ''' accuser''',
},
] , )
A_ = unmasker('''My name is <mask>''' , targets=[''' Patrick''', ''' Clara''', ''' Teven'''] , top_k=3 )
self.assertEqual(
nested_simplify(a__ , decimals=6 ) , [
{'''sequence''': '''My name is Clara''', '''score''': 2E-05, '''token''': 1_3_6_0_6, '''token_str''': ''' Clara'''},
{'''sequence''': '''My name is Patrick''', '''score''': 2E-05, '''token''': 3_4_9_9, '''token_str''': ''' Patrick'''},
{'''sequence''': '''My name is Te''', '''score''': 1.9E-05, '''token''': 2_9_4_1, '''token_str''': ''' Te'''},
] , )
@require_torch
def lowerCAmelCase_ ( self ) -> Optional[Any]:
'''simple docstring'''
A_ = pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , top_k=2 , framework='''pt''' )
A_ = unmasker('''My name is <mask>''' )
self.assertEqual(
nested_simplify(a__ , decimals=6 ) , [
{'''sequence''': '''My name is Maul''', '''score''': 2.2E-05, '''token''': 3_5_6_7_6, '''token_str''': ''' Maul'''},
{'''sequence''': '''My name isELS''', '''score''': 2.2E-05, '''token''': 1_6_4_1_6, '''token_str''': '''ELS'''},
] , )
A_ = unmasker('''The largest city in France is <mask>''' )
self.assertEqual(
nested_simplify(a__ , decimals=6 ) , [
{
'''sequence''': '''The largest city in France is Maul''',
'''score''': 2.2E-05,
'''token''': 3_5_6_7_6,
'''token_str''': ''' Maul''',
},
{'''sequence''': '''The largest city in France isELS''', '''score''': 2.2E-05, '''token''': 1_6_4_1_6, '''token_str''': '''ELS'''},
] , )
A_ = unmasker('''My name is <mask>''' , targets=[''' Patrick''', ''' Clara''', ''' Teven'''] , top_k=3 )
self.assertEqual(
nested_simplify(a__ , decimals=6 ) , [
{'''sequence''': '''My name is Patrick''', '''score''': 2.1E-05, '''token''': 3_4_9_9, '''token_str''': ''' Patrick'''},
{'''sequence''': '''My name is Te''', '''score''': 2E-05, '''token''': 2_9_4_1, '''token_str''': ''' Te'''},
{'''sequence''': '''My name is Clara''', '''score''': 2E-05, '''token''': 1_3_6_0_6, '''token_str''': ''' Clara'''},
] , )
A_ = unmasker('''My name is <mask> <mask>''' , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=6 ) , [
[
{
'''score''': 2.2E-05,
'''token''': 3_5_6_7_6,
'''token_str''': ''' Maul''',
'''sequence''': '''<s>My name is Maul<mask></s>''',
},
{'''score''': 2.2E-05, '''token''': 1_6_4_1_6, '''token_str''': '''ELS''', '''sequence''': '''<s>My name isELS<mask></s>'''},
],
[
{
'''score''': 2.2E-05,
'''token''': 3_5_6_7_6,
'''token_str''': ''' Maul''',
'''sequence''': '''<s>My name is<mask> Maul</s>''',
},
{'''score''': 2.2E-05, '''token''': 1_6_4_1_6, '''token_str''': '''ELS''', '''sequence''': '''<s>My name is<mask>ELS</s>'''},
],
] , )
@require_torch_gpu
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = pipeline('''fill-mask''' , model='''hf-internal-testing/tiny-random-distilbert''' , device=0 , framework='''pt''' )
# convert model to fp16
pipe.model.half()
A_ = pipe('''Paris is the [MASK] of France.''' )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(a__ , a__ )
@slow
@require_torch
def lowerCAmelCase_ ( self ) -> str:
'''simple docstring'''
A_ = pipeline(task='''fill-mask''' , model='''distilroberta-base''' , top_k=2 , framework='''pt''' )
self.run_large_test(a__ )
@slow
@require_tf
def lowerCAmelCase_ ( self ) -> List[Any]:
'''simple docstring'''
A_ = pipeline(task='''fill-mask''' , model='''distilroberta-base''' , top_k=2 , framework='''tf''' )
self.run_large_test(a__ )
def lowerCAmelCase_ ( self , a__ ) -> Tuple:
'''simple docstring'''
A_ = unmasker('''My name is <mask>''' )
self.assertEqual(
nested_simplify(a__ ) , [
{'''sequence''': '''My name is John''', '''score''': 0.0_08, '''token''': 6_1_0, '''token_str''': ''' John'''},
{'''sequence''': '''My name is Chris''', '''score''': 0.0_07, '''token''': 1_5_7_3, '''token_str''': ''' Chris'''},
] , )
A_ = unmasker('''The largest city in France is <mask>''' )
self.assertEqual(
nested_simplify(a__ ) , [
{
'''sequence''': '''The largest city in France is Paris''',
'''score''': 0.2_51,
'''token''': 2_2_0_1,
'''token_str''': ''' Paris''',
},
{
'''sequence''': '''The largest city in France is Lyon''',
'''score''': 0.2_14,
'''token''': 1_2_7_9_0,
'''token_str''': ''' Lyon''',
},
] , )
A_ = unmasker('''My name is <mask>''' , targets=[''' Patrick''', ''' Clara''', ''' Teven'''] , top_k=3 )
self.assertEqual(
nested_simplify(a__ ) , [
{'''sequence''': '''My name is Patrick''', '''score''': 0.0_05, '''token''': 3_4_9_9, '''token_str''': ''' Patrick'''},
{'''sequence''': '''My name is Clara''', '''score''': 0.0_00, '''token''': 1_3_6_0_6, '''token_str''': ''' Clara'''},
{'''sequence''': '''My name is Te''', '''score''': 0.0_00, '''token''': 2_9_4_1, '''token_str''': ''' Te'''},
] , )
@require_torch
def lowerCAmelCase_ ( self ) -> List[Any]:
'''simple docstring'''
A_ = pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , framework='''pt''' )
A_ = None
A_ = None
self.run_pipeline_test(a__ , [] )
@require_tf
def lowerCAmelCase_ ( self ) -> List[str]:
'''simple docstring'''
A_ = pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , framework='''tf''' )
A_ = None
A_ = None
self.run_pipeline_test(a__ , [] )
def lowerCAmelCase_ ( self , a__ , a__ , a__ ) -> Any:
'''simple docstring'''
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest('''The provided tokenizer has no mask token, (probably reformer or wav2vec2)''' )
A_ = FillMaskPipeline(model=a__ , tokenizer=a__ )
A_ = [
F"This is another {tokenizer.mask_token} test",
]
return fill_masker, examples
def lowerCAmelCase_ ( self , a__ , a__ ) -> Any:
'''simple docstring'''
A_ = fill_masker.tokenizer
A_ = fill_masker.model
A_ = fill_masker(
F"This is a {tokenizer.mask_token}" , )
self.assertEqual(
a__ , [
{'''sequence''': ANY(a__ ), '''score''': ANY(a__ ), '''token''': ANY(a__ ), '''token_str''': ANY(a__ )},
{'''sequence''': ANY(a__ ), '''score''': ANY(a__ ), '''token''': ANY(a__ ), '''token_str''': ANY(a__ )},
{'''sequence''': ANY(a__ ), '''score''': ANY(a__ ), '''token''': ANY(a__ ), '''token_str''': ANY(a__ )},
{'''sequence''': ANY(a__ ), '''score''': ANY(a__ ), '''token''': ANY(a__ ), '''token_str''': ANY(a__ )},
{'''sequence''': ANY(a__ ), '''score''': ANY(a__ ), '''token''': ANY(a__ ), '''token_str''': ANY(a__ )},
] , )
A_ = fill_masker([F"This is a {tokenizer.mask_token}"] )
self.assertEqual(
a__ , [
{'''sequence''': ANY(a__ ), '''score''': ANY(a__ ), '''token''': ANY(a__ ), '''token_str''': ANY(a__ )},
{'''sequence''': ANY(a__ ), '''score''': ANY(a__ ), '''token''': ANY(a__ ), '''token_str''': ANY(a__ )},
{'''sequence''': ANY(a__ ), '''score''': ANY(a__ ), '''token''': ANY(a__ ), '''token_str''': ANY(a__ )},
{'''sequence''': ANY(a__ ), '''score''': ANY(a__ ), '''token''': ANY(a__ ), '''token_str''': ANY(a__ )},
{'''sequence''': ANY(a__ ), '''score''': ANY(a__ ), '''token''': ANY(a__ ), '''token_str''': ANY(a__ )},
] , )
A_ = fill_masker([F"This is a {tokenizer.mask_token}", F"Another {tokenizer.mask_token} great test."] )
self.assertEqual(
a__ , [
[
{'''sequence''': ANY(a__ ), '''score''': ANY(a__ ), '''token''': ANY(a__ ), '''token_str''': ANY(a__ )},
{'''sequence''': ANY(a__ ), '''score''': ANY(a__ ), '''token''': ANY(a__ ), '''token_str''': ANY(a__ )},
{'''sequence''': ANY(a__ ), '''score''': ANY(a__ ), '''token''': ANY(a__ ), '''token_str''': ANY(a__ )},
{'''sequence''': ANY(a__ ), '''score''': ANY(a__ ), '''token''': ANY(a__ ), '''token_str''': ANY(a__ )},
{'''sequence''': ANY(a__ ), '''score''': ANY(a__ ), '''token''': ANY(a__ ), '''token_str''': ANY(a__ )},
],
[
{'''sequence''': ANY(a__ ), '''score''': ANY(a__ ), '''token''': ANY(a__ ), '''token_str''': ANY(a__ )},
{'''sequence''': ANY(a__ ), '''score''': ANY(a__ ), '''token''': ANY(a__ ), '''token_str''': ANY(a__ )},
{'''sequence''': ANY(a__ ), '''score''': ANY(a__ ), '''token''': ANY(a__ ), '''token_str''': ANY(a__ )},
{'''sequence''': ANY(a__ ), '''score''': ANY(a__ ), '''token''': ANY(a__ ), '''token_str''': ANY(a__ )},
{'''sequence''': ANY(a__ ), '''score''': ANY(a__ ), '''token''': ANY(a__ ), '''token_str''': ANY(a__ )},
],
] , )
with self.assertRaises(a__ ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(a__ ):
fill_masker('''This is''' )
self.run_test_top_k(a__ , a__ )
self.run_test_targets(a__ , a__ )
self.run_test_top_k_targets(a__ , a__ )
self.fill_mask_with_duplicate_targets_and_top_k(a__ , a__ )
self.fill_mask_with_multiple_masks(a__ , a__ )
def lowerCAmelCase_ ( self , a__ , a__ ) -> List[Any]:
'''simple docstring'''
A_ = tokenizer.get_vocab()
A_ = sorted(vocab.keys() )[:2]
# Pipeline argument
A_ = FillMaskPipeline(model=a__ , tokenizer=a__ , targets=a__ )
A_ = fill_masker(F"This is a {tokenizer.mask_token}" )
self.assertEqual(
a__ , [
{'''sequence''': ANY(a__ ), '''score''': ANY(a__ ), '''token''': ANY(a__ ), '''token_str''': ANY(a__ )},
{'''sequence''': ANY(a__ ), '''score''': ANY(a__ ), '''token''': ANY(a__ ), '''token_str''': ANY(a__ )},
] , )
A_ = {vocab[el] for el in targets}
self.assertEqual({el['''token'''] for el in outputs} , a__ )
A_ = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['''token_str'''] for el in outputs} , set(a__ ) )
# Call argument
A_ = FillMaskPipeline(model=a__ , tokenizer=a__ )
A_ = fill_masker(F"This is a {tokenizer.mask_token}" , targets=a__ )
self.assertEqual(
a__ , [
{'''sequence''': ANY(a__ ), '''score''': ANY(a__ ), '''token''': ANY(a__ ), '''token_str''': ANY(a__ )},
{'''sequence''': ANY(a__ ), '''score''': ANY(a__ ), '''token''': ANY(a__ ), '''token_str''': ANY(a__ )},
] , )
A_ = {vocab[el] for el in targets}
self.assertEqual({el['''token'''] for el in outputs} , a__ )
A_ = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['''token_str'''] for el in outputs} , set(a__ ) )
# Score equivalence
A_ = fill_masker(F"This is a {tokenizer.mask_token}" , targets=a__ )
A_ = [top_mask['''token_str'''] for top_mask in outputs]
A_ = [top_mask['''score'''] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(a__ ) == set(a__ ):
A_ = fill_masker(F"This is a {tokenizer.mask_token}" , targets=a__ )
A_ = [top_mask['''score'''] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(a__ ) , nested_simplify(a__ ) )
# Raises with invalid
with self.assertRaises(a__ ):
A_ = fill_masker(F"This is a {tokenizer.mask_token}" , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(a__ ):
A_ = fill_masker(F"This is a {tokenizer.mask_token}" , targets=[''''''] )
with self.assertRaises(a__ ):
A_ = fill_masker(F"This is a {tokenizer.mask_token}" , targets='''''' )
def lowerCAmelCase_ ( self , a__ , a__ ) -> int:
'''simple docstring'''
A_ = FillMaskPipeline(model=a__ , tokenizer=a__ , top_k=2 )
A_ = fill_masker(F"This is a {tokenizer.mask_token}" )
self.assertEqual(
a__ , [
{'''sequence''': ANY(a__ ), '''score''': ANY(a__ ), '''token''': ANY(a__ ), '''token_str''': ANY(a__ )},
{'''sequence''': ANY(a__ ), '''score''': ANY(a__ ), '''token''': ANY(a__ ), '''token_str''': ANY(a__ )},
] , )
A_ = FillMaskPipeline(model=a__ , tokenizer=a__ )
A_ = fill_masker(F"This is a {tokenizer.mask_token}" , top_k=2 )
self.assertEqual(
a__ , [
{'''sequence''': ANY(a__ ), '''score''': ANY(a__ ), '''token''': ANY(a__ ), '''token_str''': ANY(a__ )},
{'''sequence''': ANY(a__ ), '''score''': ANY(a__ ), '''token''': ANY(a__ ), '''token_str''': ANY(a__ )},
] , )
self.assertEqual(nested_simplify(a__ ) , nested_simplify(a__ ) )
def lowerCAmelCase_ ( self , a__ , a__ ) -> str:
'''simple docstring'''
A_ = tokenizer.get_vocab()
A_ = FillMaskPipeline(model=a__ , tokenizer=a__ )
# top_k=2, ntargets=3
A_ = sorted(vocab.keys() )[:3]
A_ = fill_masker(F"This is a {tokenizer.mask_token}" , top_k=2 , targets=a__ )
# If we use the most probably targets, and filter differently, we should still
# have the same results
A_ = [el['''token_str'''] for el in sorted(a__ , key=lambda a__ : x["score"] , reverse=a__ )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(a__ ).issubset(a__ ):
A_ = fill_masker(F"This is a {tokenizer.mask_token}" , top_k=3 , targets=a__ )
# They should yield exactly the same result
self.assertEqual(nested_simplify(a__ ) , nested_simplify(a__ ) )
def lowerCAmelCase_ ( self , a__ , a__ ) -> Union[str, Any]:
'''simple docstring'''
A_ = FillMaskPipeline(model=a__ , tokenizer=a__ )
A_ = tokenizer.get_vocab()
# String duplicates + id duplicates
A_ = sorted(vocab.keys() )[:3]
A_ = [targets[0], targets[1], targets[0], targets[2], targets[1]]
A_ = fill_masker(F"My name is {tokenizer.mask_token}" , targets=a__ , top_k=1_0 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(a__ ) , 3 )
def lowerCAmelCase_ ( self , a__ , a__ ) -> List[Any]:
'''simple docstring'''
A_ = FillMaskPipeline(model=a__ , tokenizer=a__ )
A_ = fill_masker(
F"This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}" , top_k=2 )
self.assertEqual(
a__ , [
[
{'''sequence''': ANY(a__ ), '''score''': ANY(a__ ), '''token''': ANY(a__ ), '''token_str''': ANY(a__ )},
{'''sequence''': ANY(a__ ), '''score''': ANY(a__ ), '''token''': ANY(a__ ), '''token_str''': ANY(a__ )},
],
[
{'''sequence''': ANY(a__ ), '''score''': ANY(a__ ), '''token''': ANY(a__ ), '''token_str''': ANY(a__ )},
{'''sequence''': ANY(a__ ), '''score''': ANY(a__ ), '''token''': ANY(a__ ), '''token_str''': ANY(a__ )},
],
[
{'''sequence''': ANY(a__ ), '''score''': ANY(a__ ), '''token''': ANY(a__ ), '''token_str''': ANY(a__ )},
{'''sequence''': ANY(a__ ), '''score''': ANY(a__ ), '''token''': ANY(a__ ), '''token_str''': ANY(a__ )},
],
] , ) | 141 | 1 |
import argparse
from collections import defaultdict
def lowercase ( _a ,_a ,_a ,_a ,_a ) -> Union[str, Any]:
UpperCAmelCase_: Any = f"{file}_{class_name}_{test_name}"
done_test[_id] += 1
with open(_a ,"r" ) as f:
UpperCAmelCase_: Union[str, Any] = f.readlines()
UpperCAmelCase_: int = f"class {class_name}("
UpperCAmelCase_: Union[str, Any] = f"{4 * ' '}def {test_name}("
UpperCAmelCase_: Dict = f"{8 * ' '}{correct_line.split()[0]}"
UpperCAmelCase_: List[str] = f"{16 * ' '}{correct_line.split()[0]}"
UpperCAmelCase_: Any = False
UpperCAmelCase_: Optional[Any] = False
UpperCAmelCase_: Tuple = False
UpperCAmelCase_: Union[str, Any] = False
UpperCAmelCase_: int = 0
UpperCAmelCase_: Tuple = 0
UpperCAmelCase_: Union[str, Any] = []
for line in lines:
if line.startswith(_a ):
UpperCAmelCase_: Tuple = True
elif in_class and line.startswith(_a ):
UpperCAmelCase_: Optional[Any] = True
elif in_class and in_func and (line.startswith(_a ) or line.startswith(_a )):
UpperCAmelCase_: Dict = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
UpperCAmelCase_: List[str] = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
UpperCAmelCase_: Tuple = True
if in_class and in_func and in_line and insert_line:
new_lines.append(f"{spaces * ' '}{correct_line}" )
UpperCAmelCase_: Dict = False
else:
new_lines.append(_a )
with open(_a ,"w" ) as f:
for line in new_lines:
f.write(_a )
def lowercase ( _a ,_a=None ) -> List[Any]:
if fail is not None:
with open(_a ,"r" ) as f:
UpperCAmelCase_: List[Any] = {l.strip() for l in f.readlines()}
else:
UpperCAmelCase_: Tuple = None
with open(_a ,"r" ) as f:
UpperCAmelCase_: Any = f.readlines()
UpperCAmelCase_: Any = defaultdict(_a )
for line in correct_lines:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_: List[Any] = line.split(";" )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(_a ,_a ,_a ,_a ,_a )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--correct_filename""", help="""filename of tests with expected result""")
parser.add_argument("""--fail_filename""", help="""filename of test failures""", type=str, default=None)
_lowerCAmelCase = parser.parse_args()
main(args.correct_filename, args.fail_filename) | 709 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class UpperCAmelCase__ ( unittest.TestCase ):
def snake_case_ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: str = 1
UpperCAmelCase_: Optional[Any] = 3
UpperCAmelCase_: Optional[int] = (32, 32)
UpperCAmelCase_: int = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(A__ )
return image
@property
def snake_case_ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase_: str = UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=A__ , only_cross_attention=(True, True, False) , num_class_embeds=100 , )
return model
@property
def snake_case_ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase_: int = AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def snake_case_ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase_: List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , )
return CLIPTextModel(A__ )
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: Any = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_: Optional[Any] = self.dummy_cond_unet_upscale
UpperCAmelCase_: Dict = DDPMScheduler()
UpperCAmelCase_: Optional[int] = DDIMScheduler(prediction_type="v_prediction" )
UpperCAmelCase_: Union[str, Any] = self.dummy_vae
UpperCAmelCase_: Optional[int] = self.dummy_text_encoder
UpperCAmelCase_: str = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCAmelCase_: Optional[int] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_: Dict = Image.fromarray(np.uinta(A__ ) ).convert("RGB" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
UpperCAmelCase_: Optional[int] = StableDiffusionUpscalePipeline(
unet=A__ , low_res_scheduler=A__ , scheduler=A__ , vae=A__ , text_encoder=A__ , tokenizer=A__ , max_noise_level=350 , )
UpperCAmelCase_: Optional[int] = sd_pipe.to(A__ )
sd_pipe.set_progress_bar_config(disable=A__ )
UpperCAmelCase_: Union[str, Any] = "A painting of a squirrel eating a burger"
UpperCAmelCase_: Any = torch.Generator(device=A__ ).manual_seed(0 )
UpperCAmelCase_: int = sd_pipe(
[prompt] , image=A__ , generator=A__ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
UpperCAmelCase_: Optional[Any] = output.images
UpperCAmelCase_: List[str] = torch.Generator(device=A__ ).manual_seed(0 )
UpperCAmelCase_: List[Any] = sd_pipe(
[prompt] , image=A__ , generator=A__ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , return_dict=A__ , )[0]
UpperCAmelCase_: List[Any] = image[0, -3:, -3:, -1]
UpperCAmelCase_: List[str] = image_from_tuple[0, -3:, -3:, -1]
UpperCAmelCase_: int = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
UpperCAmelCase_: List[str] = np.array([0.3_113, 0.3_910, 0.4_272, 0.4_859, 0.5_061, 0.4_652, 0.5_362, 0.5_715, 0.5_661] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_: Optional[Any] = self.dummy_cond_unet_upscale
UpperCAmelCase_: Union[str, Any] = DDPMScheduler()
UpperCAmelCase_: Optional[Any] = DDIMScheduler(prediction_type="v_prediction" )
UpperCAmelCase_: Dict = self.dummy_vae
UpperCAmelCase_: Any = self.dummy_text_encoder
UpperCAmelCase_: Tuple = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCAmelCase_: Union[str, Any] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_: List[str] = Image.fromarray(np.uinta(A__ ) ).convert("RGB" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
UpperCAmelCase_: str = StableDiffusionUpscalePipeline(
unet=A__ , low_res_scheduler=A__ , scheduler=A__ , vae=A__ , text_encoder=A__ , tokenizer=A__ , max_noise_level=350 , )
UpperCAmelCase_: int = sd_pipe.to(A__ )
sd_pipe.set_progress_bar_config(disable=A__ )
UpperCAmelCase_: Any = "A painting of a squirrel eating a burger"
UpperCAmelCase_: Union[str, Any] = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
UpperCAmelCase_: Any = output.images
assert image.shape[0] == 2
UpperCAmelCase_: Any = torch.Generator(device=A__ ).manual_seed(0 )
UpperCAmelCase_: Any = sd_pipe(
[prompt] , image=A__ , generator=A__ , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
UpperCAmelCase_: Dict = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: List[str] = self.dummy_cond_unet_upscale
UpperCAmelCase_: Dict = DDPMScheduler()
UpperCAmelCase_: int = DDIMScheduler(prediction_type="v_prediction" )
UpperCAmelCase_: Dict = self.dummy_vae
UpperCAmelCase_: Dict = self.dummy_text_encoder
UpperCAmelCase_: Union[str, Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCAmelCase_: List[str] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_: Union[str, Any] = Image.fromarray(np.uinta(A__ ) ).convert("RGB" ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
UpperCAmelCase_: List[str] = unet.half()
UpperCAmelCase_: Union[str, Any] = text_encoder.half()
# make sure here that pndm scheduler skips prk
UpperCAmelCase_: Optional[Any] = StableDiffusionUpscalePipeline(
unet=A__ , low_res_scheduler=A__ , scheduler=A__ , vae=A__ , text_encoder=A__ , tokenizer=A__ , max_noise_level=350 , )
UpperCAmelCase_: Optional[int] = sd_pipe.to(A__ )
sd_pipe.set_progress_bar_config(disable=A__ )
UpperCAmelCase_: Any = "A painting of a squirrel eating a burger"
UpperCAmelCase_: List[Any] = torch.manual_seed(0 )
UpperCAmelCase_: str = sd_pipe(
[prompt] , image=A__ , generator=A__ , num_inference_steps=2 , output_type="np" , ).images
UpperCAmelCase_: str = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase ):
def snake_case_ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: str = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
UpperCAmelCase_: List[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
"/upsampled_cat.npy" )
UpperCAmelCase_: Optional[Any] = "stabilityai/stable-diffusion-x4-upscaler"
UpperCAmelCase_: Optional[Any] = StableDiffusionUpscalePipeline.from_pretrained(A__ )
pipe.to(A__ )
pipe.set_progress_bar_config(disable=A__ )
pipe.enable_attention_slicing()
UpperCAmelCase_: List[str] = "a cat sitting on a park bench"
UpperCAmelCase_: Any = torch.manual_seed(0 )
UpperCAmelCase_: Any = pipe(
prompt=A__ , image=A__ , generator=A__ , output_type="np" , )
UpperCAmelCase_: Dict = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1E-3
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: Optional[int] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
UpperCAmelCase_: Dict = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
"/upsampled_cat_fp16.npy" )
UpperCAmelCase_: Optional[int] = "stabilityai/stable-diffusion-x4-upscaler"
UpperCAmelCase_: Any = StableDiffusionUpscalePipeline.from_pretrained(
A__ , torch_dtype=torch.floataa , )
pipe.to(A__ )
pipe.set_progress_bar_config(disable=A__ )
pipe.enable_attention_slicing()
UpperCAmelCase_: Any = "a cat sitting on a park bench"
UpperCAmelCase_: Optional[int] = torch.manual_seed(0 )
UpperCAmelCase_: Optional[Any] = pipe(
prompt=A__ , image=A__ , generator=A__ , output_type="np" , )
UpperCAmelCase_: str = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def snake_case_ ( self ):
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCAmelCase_: List[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
UpperCAmelCase_: Tuple = "stabilityai/stable-diffusion-x4-upscaler"
UpperCAmelCase_: Union[str, Any] = StableDiffusionUpscalePipeline.from_pretrained(
A__ , torch_dtype=torch.floataa , )
pipe.to(A__ )
pipe.set_progress_bar_config(disable=A__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
UpperCAmelCase_: str = "a cat sitting on a park bench"
UpperCAmelCase_: Optional[int] = torch.manual_seed(0 )
UpperCAmelCase_: Union[str, Any] = pipe(
prompt=A__ , image=A__ , generator=A__ , num_inference_steps=5 , output_type="np" , )
UpperCAmelCase_: Any = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9 | 306 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
lowerCamelCase__ : Dict = logging.get_logger(__name__)
lowerCamelCase__ : Union[str, Any] = """▁"""
lowerCamelCase__ : Union[str, Any] = {"""vocab_file""": """sentencepiece.bpe.model"""}
lowerCamelCase__ : Union[str, Any] = {
"""vocab_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model"""
),
}
}
lowerCamelCase__ : List[Any] = {
"""facebook/nllb-200-distilled-600M""": 1_0_2_4,
}
# fmt: off
lowerCamelCase__ : List[str] = ["""ace_Arab""", """ace_Latn""", """acm_Arab""", """acq_Arab""", """aeb_Arab""", """afr_Latn""", """ajp_Arab""", """aka_Latn""", """amh_Ethi""", """apc_Arab""", """arb_Arab""", """ars_Arab""", """ary_Arab""", """arz_Arab""", """asm_Beng""", """ast_Latn""", """awa_Deva""", """ayr_Latn""", """azb_Arab""", """azj_Latn""", """bak_Cyrl""", """bam_Latn""", """ban_Latn""", """bel_Cyrl""", """bem_Latn""", """ben_Beng""", """bho_Deva""", """bjn_Arab""", """bjn_Latn""", """bod_Tibt""", """bos_Latn""", """bug_Latn""", """bul_Cyrl""", """cat_Latn""", """ceb_Latn""", """ces_Latn""", """cjk_Latn""", """ckb_Arab""", """crh_Latn""", """cym_Latn""", """dan_Latn""", """deu_Latn""", """dik_Latn""", """dyu_Latn""", """dzo_Tibt""", """ell_Grek""", """eng_Latn""", """epo_Latn""", """est_Latn""", """eus_Latn""", """ewe_Latn""", """fao_Latn""", """pes_Arab""", """fij_Latn""", """fin_Latn""", """fon_Latn""", """fra_Latn""", """fur_Latn""", """fuv_Latn""", """gla_Latn""", """gle_Latn""", """glg_Latn""", """grn_Latn""", """guj_Gujr""", """hat_Latn""", """hau_Latn""", """heb_Hebr""", """hin_Deva""", """hne_Deva""", """hrv_Latn""", """hun_Latn""", """hye_Armn""", """ibo_Latn""", """ilo_Latn""", """ind_Latn""", """isl_Latn""", """ita_Latn""", """jav_Latn""", """jpn_Jpan""", """kab_Latn""", """kac_Latn""", """kam_Latn""", """kan_Knda""", """kas_Arab""", """kas_Deva""", """kat_Geor""", """knc_Arab""", """knc_Latn""", """kaz_Cyrl""", """kbp_Latn""", """kea_Latn""", """khm_Khmr""", """kik_Latn""", """kin_Latn""", """kir_Cyrl""", """kmb_Latn""", """kon_Latn""", """kor_Hang""", """kmr_Latn""", """lao_Laoo""", """lvs_Latn""", """lij_Latn""", """lim_Latn""", """lin_Latn""", """lit_Latn""", """lmo_Latn""", """ltg_Latn""", """ltz_Latn""", """lua_Latn""", """lug_Latn""", """luo_Latn""", """lus_Latn""", """mag_Deva""", """mai_Deva""", """mal_Mlym""", """mar_Deva""", """min_Latn""", """mkd_Cyrl""", """plt_Latn""", """mlt_Latn""", """mni_Beng""", """khk_Cyrl""", """mos_Latn""", """mri_Latn""", """zsm_Latn""", """mya_Mymr""", """nld_Latn""", """nno_Latn""", """nob_Latn""", """npi_Deva""", """nso_Latn""", """nus_Latn""", """nya_Latn""", """oci_Latn""", """gaz_Latn""", """ory_Orya""", """pag_Latn""", """pan_Guru""", """pap_Latn""", """pol_Latn""", """por_Latn""", """prs_Arab""", """pbt_Arab""", """quy_Latn""", """ron_Latn""", """run_Latn""", """rus_Cyrl""", """sag_Latn""", """san_Deva""", """sat_Beng""", """scn_Latn""", """shn_Mymr""", """sin_Sinh""", """slk_Latn""", """slv_Latn""", """smo_Latn""", """sna_Latn""", """snd_Arab""", """som_Latn""", """sot_Latn""", """spa_Latn""", """als_Latn""", """srd_Latn""", """srp_Cyrl""", """ssw_Latn""", """sun_Latn""", """swe_Latn""", """swh_Latn""", """szl_Latn""", """tam_Taml""", """tat_Cyrl""", """tel_Telu""", """tgk_Cyrl""", """tgl_Latn""", """tha_Thai""", """tir_Ethi""", """taq_Latn""", """taq_Tfng""", """tpi_Latn""", """tsn_Latn""", """tso_Latn""", """tuk_Latn""", """tum_Latn""", """tur_Latn""", """twi_Latn""", """tzm_Tfng""", """uig_Arab""", """ukr_Cyrl""", """umb_Latn""", """urd_Arab""", """uzn_Latn""", """vec_Latn""", """vie_Latn""", """war_Latn""", """wol_Latn""", """xho_Latn""", """ydd_Hebr""", """yor_Latn""", """yue_Hant""", """zho_Hans""", """zho_Hant""", """zul_Latn"""]
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : Union[str, Any] = VOCAB_FILES_NAMES
__lowercase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : Dict = PRETRAINED_VOCAB_FILES_MAP
__lowercase : Optional[Any] = ['input_ids', 'attention_mask']
__lowercase : List[int] = []
__lowercase : List[int] = []
def __init__( self:List[Any] , _a:int , _a:Optional[int]="<s>" , _a:Any="</s>" , _a:int="</s>" , _a:str="<s>" , _a:Tuple="<unk>" , _a:Any="<pad>" , _a:str="<mask>" , _a:str=None , _a:Union[str, Any]=None , _a:List[Any]=None , _a:Optional[Dict[str, Any]] = None , _a:Any=None , _a:str=False , **_a:Tuple , ):
# Mask token behave like a normal word, i.e. include the space before it
snake_case__ = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token
snake_case__ = {} if sp_model_kwargs is None else sp_model_kwargs
snake_case__ = legacy_behaviour
super().__init__(
bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , cls_token=_a , pad_token=_a , mask_token=_a , tokenizer_file=_a , src_lang=_a , tgt_lang=_a , additional_special_tokens=_a , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=_a , **_a , )
snake_case__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_a ) )
snake_case__ = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
snake_case__ = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
snake_case__ = 1
snake_case__ = len(self.sp_model )
snake_case__ = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(_a )
}
snake_case__ = {v: k for k, v in self.lang_code_to_id.items()}
snake_case__ = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
snake_case__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
snake_case__ = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
snake_case__ = src_lang if src_lang is not None else '''eng_Latn'''
snake_case__ = self.lang_code_to_id[self._src_lang]
snake_case__ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self:Optional[Any] ):
snake_case__ = self.__dict__.copy()
snake_case__ = None
snake_case__ = self.sp_model.serialized_model_proto()
return state
def __setstate__( self:int , _a:List[Any] ):
snake_case__ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
snake_case__ = {}
snake_case__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def SCREAMING_SNAKE_CASE__ ( self:str ):
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
return self._src_lang
@src_lang.setter
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] , _a:str ):
snake_case__ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def SCREAMING_SNAKE_CASE__ ( self:List[str] , _a:List[int] , _a:Optional[List[int]] = None , _a:bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a )
snake_case__ = [1] * len(self.prefix_tokens )
snake_case__ = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(_a )) + suffix_ones
return prefix_ones + ([0] * len(_a )) + ([0] * len(_a )) + suffix_ones
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] , _a:List[int] , _a:Optional[List[int]] = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def SCREAMING_SNAKE_CASE__ ( self:List[str] , _a:List[int] , _a:Optional[List[int]] = None ):
snake_case__ = [self.sep_token_id]
snake_case__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE__ ( self:List[Any] , _a:Optional[Any] , _a:str , _a:Optional[str] , _a:Optional[str] , **_a:List[str] ):
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
snake_case__ = src_lang
snake_case__ = self(_a , add_special_tokens=_a , return_tensors=_a , **_a )
snake_case__ = self.convert_tokens_to_ids(_a )
snake_case__ = tgt_lang_id
return inputs
def SCREAMING_SNAKE_CASE__ ( self:str ):
snake_case__ = {self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE__ ( self:List[Any] , _a:str ):
return self.sp_model.encode(_a , out_type=_a )
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] , _a:List[str] ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
snake_case__ = self.sp_model.PieceToId(_a )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] , _a:str ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] , _a:Any ):
snake_case__ = ''''''.join(_a ).replace(_a , ''' ''' ).strip()
return out_string
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] , _a:str , _a:Optional[str] = None ):
if not os.path.isdir(_a ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case__ = os.path.join(
_a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _a )
elif not os.path.isfile(self.vocab_file ):
with open(_a , '''wb''' ) as fi:
snake_case__ = self.sp_model.serialized_model_proto()
fi.write(_a )
return (out_vocab_file,)
def SCREAMING_SNAKE_CASE__ ( self:str , _a:List[str] , _a:str = "eng_Latn" , _a:Optional[List[str]] = None , _a:str = "fra_Latn" , **_a:Union[str, Any] , ):
snake_case__ = src_lang
snake_case__ = tgt_lang
return super().prepare_seqaseq_batch(_a , _a , **_a )
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
return self.set_src_lang_special_tokens(self.src_lang )
def SCREAMING_SNAKE_CASE__ ( self:int ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] , _a:int ):
snake_case__ = self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
snake_case__ = []
snake_case__ = [self.eos_token_id, self.cur_lang_code]
else:
snake_case__ = [self.cur_lang_code]
snake_case__ = [self.eos_token_id]
def SCREAMING_SNAKE_CASE__ ( self:Dict , _a:str ):
snake_case__ = self.lang_code_to_id[lang]
if self.legacy_behaviour:
snake_case__ = []
snake_case__ = [self.eos_token_id, self.cur_lang_code]
else:
snake_case__ = [self.cur_lang_code]
snake_case__ = [self.eos_token_id]
| 33 | """simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] ={
'a': 'AAAAA',
'b': 'AAAAB',
'c': 'AAABA',
'd': 'AAABB',
'e': 'AABAA',
'f': 'AABAB',
'g': 'AABBA',
'h': 'AABBB',
'i': 'ABAAA',
'j': 'BBBAA',
'k': 'ABAAB',
'l': 'ABABA',
'm': 'ABABB',
'n': 'ABBAA',
'o': 'ABBAB',
'p': 'ABBBA',
'q': 'ABBBB',
'r': 'BAAAA',
's': 'BAAAB',
't': 'BAABA',
'u': 'BAABB',
'v': 'BBBAB',
'w': 'BABAA',
'x': 'BABAB',
'y': 'BABBA',
'z': 'BABBB',
' ': ' ',
}
SCREAMING_SNAKE_CASE__ : str ={value: key for key, value in encode_dict.items()}
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ ) ->str:
_lowerCamelCase : Dict = ''''''
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception('''encode() accepts only letters of the alphabet and spaces''' )
return encoded
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ ) ->str:
if set(SCREAMING_SNAKE_CASE_ ) - {"A", "B", " "} != set():
raise Exception('''decode() accepts only \'A\', \'B\' and spaces''' )
_lowerCamelCase : List[Any] = ''''''
for word in coded.split():
while len(SCREAMING_SNAKE_CASE_ ) != 0:
decoded += decode_dict[word[:5]]
_lowerCamelCase : Union[str, Any] = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 434 | 0 |
from typing import Any
class _UpperCamelCase :
def __init__( self: Any , _SCREAMING_SNAKE_CASE: Tuple ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ = data
UpperCamelCase_ = None
def __repr__( self: Optional[Any] ) -> str:
"""simple docstring"""
return f'''Node({self.data})'''
class _UpperCamelCase :
def __init__( self: Optional[int] ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ = None
def __iter__( self: str ) -> Any:
"""simple docstring"""
UpperCamelCase_ = self.head
while node:
yield node.data
UpperCamelCase_ = node.next
def __len__( self: Tuple ) -> int:
"""simple docstring"""
return sum(1 for _ in self )
def __repr__( self: List[str] ) -> str:
"""simple docstring"""
return "->".join([str(_SCREAMING_SNAKE_CASE ) for item in self] )
def __getitem__( self: Dict , _SCREAMING_SNAKE_CASE: Dict ) -> Any:
"""simple docstring"""
if not 0 <= index < len(self ):
raise ValueError("list index out of range." )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self: Any , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: Tuple ) -> None:
"""simple docstring"""
if not 0 <= index < len(self ):
raise ValueError("list index out of range." )
UpperCamelCase_ = self.head
for _ in range(_SCREAMING_SNAKE_CASE ):
UpperCamelCase_ = current.next
UpperCamelCase_ = data
def lowercase ( self: str , _SCREAMING_SNAKE_CASE: Dict ) -> None:
"""simple docstring"""
self.insert_nth(len(self ) , _SCREAMING_SNAKE_CASE )
def lowercase ( self: Any , _SCREAMING_SNAKE_CASE: List[str] ) -> None:
"""simple docstring"""
self.insert_nth(0 , _SCREAMING_SNAKE_CASE )
def lowercase ( self: int , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: Optional[Any] ) -> None:
"""simple docstring"""
if not 0 <= index <= len(self ):
raise IndexError("list index out of range" )
UpperCamelCase_ = Node(_SCREAMING_SNAKE_CASE )
if self.head is None:
UpperCamelCase_ = new_node
elif index == 0:
UpperCamelCase_ = self.head # link new_node to head
UpperCamelCase_ = new_node
else:
UpperCamelCase_ = self.head
for _ in range(index - 1 ):
UpperCamelCase_ = temp.next
UpperCamelCase_ = temp.next
UpperCamelCase_ = new_node
def lowercase ( self: List[Any] ) -> None: # print every node data
"""simple docstring"""
print(self )
def lowercase ( self: List[Any] ) -> Any:
"""simple docstring"""
return self.delete_nth(0 )
def lowercase ( self: str ) -> Any: # delete from tail
"""simple docstring"""
return self.delete_nth(len(self ) - 1 )
def lowercase ( self: List[Any] , _SCREAMING_SNAKE_CASE: Any = 0 ) -> Any:
"""simple docstring"""
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError("List index out of range." )
UpperCamelCase_ = self.head # default first node
if index == 0:
UpperCamelCase_ = self.head.next
else:
UpperCamelCase_ = self.head
for _ in range(index - 1 ):
UpperCamelCase_ = temp.next
UpperCamelCase_ = temp.next
UpperCamelCase_ = temp.next.next
return delete_node.data
def lowercase ( self: Union[str, Any] ) -> bool:
"""simple docstring"""
return self.head is None
def lowercase ( self: int ) -> None:
"""simple docstring"""
UpperCamelCase_ = None
UpperCamelCase_ = self.head
while current:
# Store the current node's next node.
UpperCamelCase_ = current.next
# Make the current node's next point backwards
UpperCamelCase_ = prev
# Make the previous node be the current node
UpperCamelCase_ = current
# Make the current node the next node (to progress iteration)
UpperCamelCase_ = next_node
# Return prev in order to put the head at the end
UpperCamelCase_ = prev
def lowerCAmelCase_ ( ) -> None:
UpperCamelCase_ = LinkedList()
assert linked_list.is_empty() is True
assert str(__snake_case ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(__snake_case ) == i
linked_list.insert_nth(__snake_case , i + 1 )
assert str(__snake_case ) == "->".join(str(__snake_case ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(__snake_case ) == "->".join(str(__snake_case ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(__snake_case ) == 9
assert str(__snake_case ) == "->".join(str(__snake_case ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
UpperCamelCase_ = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(__snake_case ) == "->".join(str(__snake_case ) for i in range(-8 , 1 ) )
def lowerCAmelCase_ ( ) -> None:
UpperCamelCase_ = [
-9,
100,
Node(77345112 ),
"dlrow olleH",
7,
5555,
0,
-192.55555,
"Hello, world!",
77.9,
Node(10 ),
None,
None,
12.20,
]
UpperCamelCase_ = LinkedList()
for i in test_input:
linked_list.insert_tail(__snake_case )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(__snake_case ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
UpperCamelCase_ = linked_list.delete_head()
assert result == -9
assert (
str(__snake_case ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
UpperCamelCase_ = linked_list.delete_tail()
assert result == 12.2
assert (
str(__snake_case ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
UpperCamelCase_ = linked_list.delete_nth(10 )
assert result is None
assert (
str(__snake_case ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node("Hello again, world!" ) )
assert (
str(__snake_case )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(__snake_case )
assert (
str(__snake_case )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(__snake_case )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def lowerCAmelCase_ ( ) -> List[str]:
from doctest import testmod
testmod()
UpperCamelCase_ = LinkedList()
linked_list.insert_head(input("Inserting 1st at head " ).strip() )
linked_list.insert_head(input("Inserting 2nd at head " ).strip() )
print("\nPrint list:" )
linked_list.print_list()
linked_list.insert_tail(input("\nInserting 1st at tail " ).strip() )
linked_list.insert_tail(input("Inserting 2nd at tail " ).strip() )
print("\nPrint list:" )
linked_list.print_list()
print("\nDelete head" )
linked_list.delete_head()
print("Delete tail" )
linked_list.delete_tail()
print("\nPrint list:" )
linked_list.print_list()
print("\nReverse linked list" )
linked_list.reverse()
print("\nPrint list:" )
linked_list.print_list()
print("\nString representation of linked list:" )
print(__snake_case )
print("\nReading/changing Node data using indexing:" )
print(F'''Element at Position 1: {linked_list[1]}''' )
UpperCamelCase_ = input("Enter New Value: " ).strip()
print("New list:" )
print(__snake_case )
print(F'''length of linked_list is : {len(__snake_case )}''' )
if __name__ == "__main__":
main()
| 713 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 6_5_0, '''eval_accuracy''': 0.7, '''eval_loss''': 0.6},
},
{
'''framework''': '''pytorch''',
'''script''': '''run_ddp.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 6_0_0, '''eval_accuracy''': 0.7, '''eval_loss''': 0.6},
},
{
'''framework''': '''tensorflow''',
'''script''': '''run_tf_dist.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 6_0_0, '''eval_accuracy''': 0.6, '''eval_loss''': 0.7},
},
] )
class _UpperCamelCase ( unittest.TestCase ):
def lowercase ( self: Union[str, Any] ) -> List[str]:
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
f'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding="utf-8" , check=_SCREAMING_SNAKE_CASE , )
assert hasattr(self , "env" )
def lowercase ( self: List[str] , _SCREAMING_SNAKE_CASE: Dict ) -> str:
"""simple docstring"""
UpperCamelCase_ = f'''{self.env.base_job_name}-{instance_count}-{"ddp" if "ddp" in self.script else "smd"}'''
# distributed data settings
UpperCamelCase_ = {"smdistributed": {"dataparallel": {"enabled": True}}} if self.script != "run_ddp.py" else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=_SCREAMING_SNAKE_CASE , instance_count=_SCREAMING_SNAKE_CASE , instance_type=self.instance_type , debugger_hook_config=_SCREAMING_SNAKE_CASE , hyperparameters={**self.env.distributed_hyperparameters, "model_name_or_path": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=_SCREAMING_SNAKE_CASE , py_version="py36" , )
def lowercase ( self: List[str] , _SCREAMING_SNAKE_CASE: Any ) -> List[Any]:
"""simple docstring"""
TrainingJobAnalytics(_SCREAMING_SNAKE_CASE ).export_csv(f'''{self.env.test_path}/{job_name}_metrics.csv''' )
@parameterized.expand([(2,)] )
def lowercase ( self: str , _SCREAMING_SNAKE_CASE: Tuple ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ = self.create_estimator(_SCREAMING_SNAKE_CASE )
# run training
estimator.fit()
# result dataframe
UpperCamelCase_ = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
UpperCamelCase_ = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
UpperCamelCase_ = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
UpperCamelCase_ = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" , 999999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f'''{estimator.latest_training_job.name}.json''' , "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , _SCREAMING_SNAKE_CASE )
| 371 | 0 |
'''simple docstring'''
from maths.prime_factors import prime_factors
def UpperCamelCase__ ( _lowercase : int ) -> int:
if not isinstance(_lowercase , _lowercase ):
__UpperCAmelCase: List[str] = F'''Input value of [number={number}] must be an integer'''
raise TypeError(_lowercase )
if number < 1:
raise ValueError("""Input must be a positive integer""" )
return -1 if len(prime_factors(_lowercase ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod() | 523 | '''simple docstring'''
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def UpperCamelCase__ ( _lowercase : Dict ) -> str:
for param in module.parameters():
__UpperCAmelCase: int = False
def UpperCamelCase__ ( ) -> List[Any]:
__UpperCAmelCase: int = """cuda""" if torch.cuda.is_available() else """cpu"""
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
__UpperCAmelCase: Dict = """mps"""
if device == "mps":
print(
"""WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch"""
""" errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues"""
""" with generations.""" )
return device
def UpperCamelCase__ ( _lowercase : List[str] ) -> List[str]:
__UpperCAmelCase: List[str] = plt.imshow(_lowercase )
fig.axes.get_xaxis().set_visible(_lowercase )
fig.axes.get_yaxis().set_visible(_lowercase )
plt.show()
def UpperCamelCase__ ( ) -> List[Any]:
__UpperCAmelCase: Any = datetime.now()
__UpperCAmelCase: Any = current_time.strftime("""%H:%M:%S""" )
return timestamp | 523 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ ) ->Optional[int]:
_lowerCamelCase : List[Any] = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class _UpperCAmelCase ( a_ , a_ , a_ , unittest.TestCase ):
"""simple docstring"""
__snake_case = StableDiffusionLatentUpscalePipeline
__snake_case = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"""height""",
"""width""",
"""cross_attention_kwargs""",
"""negative_prompt_embeds""",
"""prompt_embeds""",
}
__snake_case = PipelineTesterMixin.required_optional_params - {"""num_images_per_prompt"""}
__snake_case = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__snake_case = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__snake_case = frozenset([] )
__snake_case = True
@property
def a__ ( self ) -> Optional[int]:
_lowerCamelCase : Dict = 1
_lowerCamelCase : Optional[int] = 4
_lowerCamelCase : Optional[int] = (16, 16)
_lowerCamelCase : Optional[int] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_lowercase )
return image
def a__ ( self ) -> Any:
torch.manual_seed(0 )
_lowerCamelCase : Any = UNetaDConditionModel(
act_fn='''gelu''' , attention_head_dim=8 , norm_num_groups=_lowercase , block_out_channels=[32, 32, 64, 64] , time_cond_proj_dim=160 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=32 , down_block_types=(
'''KDownBlock2D''',
'''KCrossAttnDownBlock2D''',
'''KCrossAttnDownBlock2D''',
'''KCrossAttnDownBlock2D''',
) , in_channels=8 , mid_block_type=_lowercase , only_cross_attention=_lowercase , out_channels=5 , resnet_time_scale_shift='''scale_shift''' , time_embedding_type='''fourier''' , timestep_post_act='''gelu''' , up_block_types=('''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KUpBlock2D''') , )
_lowerCamelCase : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 32, 64, 64] , in_channels=3 , out_channels=3 , down_block_types=[
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
_lowerCamelCase : Optional[Any] = EulerDiscreteScheduler(prediction_type='''sample''' )
_lowerCamelCase : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''quick_gelu''' , projection_dim=512 , )
_lowerCamelCase : Optional[Any] = CLIPTextModel(_lowercase )
_lowerCamelCase : str = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
_lowerCamelCase : List[str] = {
'''unet''': model.eval(),
'''vae''': vae.eval(),
'''scheduler''': scheduler,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def a__ ( self , _lowercase , _lowercase=0 ) -> List[str]:
if str(_lowercase ).startswith('''mps''' ):
_lowerCamelCase : List[Any] = torch.manual_seed(_lowercase )
else:
_lowerCamelCase : Tuple = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
_lowerCamelCase : int = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': self.dummy_image.cpu(),
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def a__ ( self ) -> Optional[int]:
_lowerCamelCase : int = '''cpu'''
_lowerCamelCase : str = self.get_dummy_components()
_lowerCamelCase : Union[str, Any] = self.pipeline_class(**_lowercase )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
_lowerCamelCase : Dict = self.get_dummy_inputs(_lowercase )
_lowerCamelCase : List[str] = pipe(**_lowercase ).images
_lowerCamelCase : Union[str, Any] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 256, 256, 3) )
_lowerCamelCase : List[str] = np.array(
[0.47222412, 0.41921633, 0.44717434, 0.46874192, 0.42588258, 0.46150726, 0.4677534, 0.45583832, 0.48579055] )
_lowerCamelCase : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_lowercase , 1E-3 )
def a__ ( self ) -> Optional[int]:
super().test_attention_slicing_forward_pass(expected_max_diff=7E-3 )
def a__ ( self ) -> List[str]:
super().test_cpu_offload_forward_pass(expected_max_diff=3E-3 )
def a__ ( self ) -> int:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def a__ ( self ) -> str:
super().test_inference_batch_single_identical(expected_max_diff=7E-3 )
def a__ ( self ) -> Optional[int]:
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3E-3 )
def a__ ( self ) -> int:
super().test_save_load_local(expected_max_difference=3E-3 )
def a__ ( self ) -> Optional[int]:
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def a__ ( self ) -> List[Any]:
_lowerCamelCase : Optional[Any] = [
'''DDIMScheduler''',
'''DDPMScheduler''',
'''PNDMScheduler''',
'''HeunDiscreteScheduler''',
'''EulerAncestralDiscreteScheduler''',
'''KDPM2DiscreteScheduler''',
'''KDPM2AncestralDiscreteScheduler''',
'''DPMSolverSDEScheduler''',
]
_lowerCamelCase : Optional[Any] = self.get_dummy_components()
_lowerCamelCase : str = self.pipeline_class(**_lowercase )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=_lowercase )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
_lowerCamelCase : Dict = self.get_dummy_inputs(_lowercase )
_lowerCamelCase : Any = 2
_lowerCamelCase : Optional[int] = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
_lowerCamelCase : Optional[int] = getattr(_lowercase , scheduler_enum.name )
_lowerCamelCase : str = scheduler_cls.from_config(pipe.scheduler.config )
_lowerCamelCase : Optional[int] = pipe(**_lowercase )[0]
outputs.append(_lowercase )
assert check_same_shape(_lowercase )
@require_torch_gpu
@slow
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def a__ ( self ) -> Dict:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self ) -> str:
_lowerCamelCase : Dict = torch.manual_seed(33 )
_lowerCamelCase : Optional[int] = StableDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' , torch_dtype=torch.floataa )
pipe.to('''cuda''' )
_lowerCamelCase : Dict = StableDiffusionLatentUpscalePipeline.from_pretrained(
'''stabilityai/sd-x2-latent-upscaler''' , torch_dtype=torch.floataa )
upscaler.to('''cuda''' )
_lowerCamelCase : Tuple = '''a photo of an astronaut high resolution, unreal engine, ultra realistic'''
_lowerCamelCase : Tuple = pipe(_lowercase , generator=_lowercase , output_type='''latent''' ).images
_lowerCamelCase : Optional[Any] = upscaler(
prompt=_lowercase , image=_lowercase , num_inference_steps=20 , guidance_scale=0 , generator=_lowercase , output_type='''np''' , ).images[0]
_lowerCamelCase : str = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy''' )
assert np.abs((expected_image - image).mean() ) < 5E-2
def a__ ( self ) -> Any:
_lowerCamelCase : List[Any] = torch.manual_seed(33 )
_lowerCamelCase : List[Any] = StableDiffusionLatentUpscalePipeline.from_pretrained(
'''stabilityai/sd-x2-latent-upscaler''' , torch_dtype=torch.floataa )
upscaler.to('''cuda''' )
_lowerCamelCase : List[str] = '''the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas'''
_lowerCamelCase : str = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png''' )
_lowerCamelCase : Optional[Any] = upscaler(
prompt=_lowercase , image=_lowercase , num_inference_steps=20 , guidance_scale=0 , generator=_lowercase , output_type='''np''' , ).images[0]
_lowerCamelCase : List[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy''' )
assert np.abs((expected_image - image).max() ) < 5E-2
| 558 | """simple docstring"""
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ : Dict =logging.get_logger(__name__)
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ ) ->Dict:
_lowerCamelCase : Tuple = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
_lowerCamelCase : List[Any] = 128
elif "12-12" in model_name:
_lowerCamelCase : Optional[int] = 12
_lowerCamelCase : Union[str, Any] = 12
elif "14-14" in model_name:
_lowerCamelCase : Union[str, Any] = 14
_lowerCamelCase : str = 14
elif "16-16" in model_name:
_lowerCamelCase : Tuple = 16
_lowerCamelCase : List[Any] = 16
else:
raise ValueError('''Model not supported''' )
_lowerCamelCase : Optional[int] = '''huggingface/label-files'''
if "speech-commands" in model_name:
_lowerCamelCase : int = 35
_lowerCamelCase : Optional[int] = '''speech-commands-v2-id2label.json'''
else:
_lowerCamelCase : str = 527
_lowerCamelCase : List[Any] = '''audioset-id2label.json'''
_lowerCamelCase : Optional[Any] = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type='''dataset''' ) , '''r''' ) )
_lowerCamelCase : List[Any] = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
_lowerCamelCase : int = idalabel
_lowerCamelCase : int = {v: k for k, v in idalabel.items()}
return config
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ ) ->Dict:
if "module.v" in name:
_lowerCamelCase : Tuple = name.replace('''module.v''' , '''audio_spectrogram_transformer''' )
if "cls_token" in name:
_lowerCamelCase : str = name.replace('''cls_token''' , '''embeddings.cls_token''' )
if "dist_token" in name:
_lowerCamelCase : Optional[Any] = name.replace('''dist_token''' , '''embeddings.distillation_token''' )
if "pos_embed" in name:
_lowerCamelCase : str = name.replace('''pos_embed''' , '''embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
_lowerCamelCase : List[Any] = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
# transformer blocks
if "blocks" in name:
_lowerCamelCase : Optional[int] = name.replace('''blocks''' , '''encoder.layer''' )
if "attn.proj" in name:
_lowerCamelCase : Dict = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
_lowerCamelCase : Tuple = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
_lowerCamelCase : str = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
_lowerCamelCase : str = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
_lowerCamelCase : List[str] = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
_lowerCamelCase : List[str] = name.replace('''mlp.fc2''' , '''output.dense''' )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
_lowerCamelCase : Union[str, Any] = name.replace('''audio_spectrogram_transformer.norm''' , '''audio_spectrogram_transformer.layernorm''' )
# classifier head
if "module.mlp_head.0" in name:
_lowerCamelCase : Optional[Any] = name.replace('''module.mlp_head.0''' , '''classifier.layernorm''' )
if "module.mlp_head.1" in name:
_lowerCamelCase : int = name.replace('''module.mlp_head.1''' , '''classifier.dense''' )
return name
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->Optional[int]:
for key in orig_state_dict.copy().keys():
_lowerCamelCase : List[Any] = orig_state_dict.pop(SCREAMING_SNAKE_CASE_ )
if "qkv" in key:
_lowerCamelCase : Dict = key.split('''.''' )
_lowerCamelCase : int = int(key_split[3] )
_lowerCamelCase : int = config.hidden_size
if "weight" in key:
_lowerCamelCase : Any = val[:dim, :]
_lowerCamelCase : Optional[Any] = val[dim : dim * 2, :]
_lowerCamelCase : List[Any] = val[-dim:, :]
else:
_lowerCamelCase : Optional[int] = val[:dim]
_lowerCamelCase : Optional[int] = val[dim : dim * 2]
_lowerCamelCase : Optional[Any] = val[-dim:]
else:
_lowerCamelCase : Dict = val
return orig_state_dict
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ ) ->Union[str, Any]:
_lowerCamelCase : int = [
'''module.v.head.weight''',
'''module.v.head.bias''',
'''module.v.head_dist.weight''',
'''module.v.head_dist.bias''',
]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@torch.no_grad()
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ) ->Tuple:
_lowerCamelCase : Optional[int] = get_audio_spectrogram_transformer_config(SCREAMING_SNAKE_CASE_ )
_lowerCamelCase : Tuple = {
'''ast-finetuned-audioset-10-10-0.4593''': (
'''https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1'''
),
'''ast-finetuned-audioset-10-10-0.450''': (
'''https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1'''
),
'''ast-finetuned-audioset-10-10-0.448''': (
'''https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1'''
),
'''ast-finetuned-audioset-10-10-0.448-v2''': (
'''https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1'''
),
'''ast-finetuned-audioset-12-12-0.447''': (
'''https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1'''
),
'''ast-finetuned-audioset-14-14-0.443''': (
'''https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1'''
),
'''ast-finetuned-audioset-16-16-0.442''': (
'''https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1'''
),
'''ast-finetuned-speech-commands-v2''': (
'''https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1'''
),
}
# load original state_dict
_lowerCamelCase : Dict = model_name_to_url[model_name]
_lowerCamelCase : Any = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE_ , map_location='''cpu''' )
# remove some keys
remove_keys(SCREAMING_SNAKE_CASE_ )
# rename some keys
_lowerCamelCase : str = convert_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# load 🤗 model
_lowerCamelCase : Tuple = ASTForAudioClassification(SCREAMING_SNAKE_CASE_ )
model.eval()
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
_lowerCamelCase : List[str] = -4.2677393 if '''speech-commands''' not in model_name else -6.845978
_lowerCamelCase : Any = 4.5689974 if '''speech-commands''' not in model_name else 5.5654526
_lowerCamelCase : int = 1024 if '''speech-commands''' not in model_name else 128
_lowerCamelCase : Optional[Any] = ASTFeatureExtractor(mean=SCREAMING_SNAKE_CASE_ , std=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ )
if "speech-commands" in model_name:
_lowerCamelCase : str = load_dataset('''speech_commands''' , '''v0.02''' , split='''validation''' )
_lowerCamelCase : int = dataset[0]['''audio''']['''array''']
else:
_lowerCamelCase : List[Any] = hf_hub_download(
repo_id='''nielsr/audio-spectogram-transformer-checkpoint''' , filename='''sample_audio.flac''' , repo_type='''dataset''' , )
_lowerCamelCase, _lowerCamelCase : List[Any] = torchaudio.load(SCREAMING_SNAKE_CASE_ )
_lowerCamelCase : int = waveform.squeeze().numpy()
_lowerCamelCase : List[Any] = feature_extractor(SCREAMING_SNAKE_CASE_ , sampling_rate=1_6000 , return_tensors='''pt''' )
# forward pass
_lowerCamelCase : str = model(**SCREAMING_SNAKE_CASE_ )
_lowerCamelCase : Tuple = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
_lowerCamelCase : Optional[int] = torch.tensor([-0.8760, -7.0042, -8.6602] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
_lowerCamelCase : Tuple = torch.tensor([-1.1986, -7.0903, -8.2718] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
_lowerCamelCase : Dict = torch.tensor([-2.6128, -8.0080, -9.4344] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
_lowerCamelCase : List[str] = torch.tensor([-1.5080, -7.4534, -8.8917] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
_lowerCamelCase : Union[str, Any] = torch.tensor([-0.5050, -6.5833, -8.0843] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
_lowerCamelCase : str = torch.tensor([-0.3826, -7.0336, -8.2413] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
_lowerCamelCase : List[str] = torch.tensor([-1.2113, -6.9101, -8.3470] )
elif model_name == "ast-finetuned-speech-commands-v2":
_lowerCamelCase : str = torch.tensor([6.1589, -8.0566, -8.7984] )
else:
raise ValueError('''Unknown model name''' )
if not torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ):
raise ValueError('''Logits don\'t match''' )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print(F'''Saving feature extractor to {pytorch_dump_folder_path}''' )
feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if push_to_hub:
print('''Pushing model and feature extractor to the hub...''' )
model.push_to_hub(F'''MIT/{model_name}''' )
feature_extractor.push_to_hub(F'''MIT/{model_name}''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Tuple =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='ast-finetuned-audioset-10-10-0.4593',
type=str,
help='Name of the Audio Spectrogram Transformer model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
SCREAMING_SNAKE_CASE__ : Any =parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 558 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_UpperCamelCase = {
'configuration_mega': ['MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegaConfig', 'MegaOnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
'MEGA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MegaForCausalLM',
'MegaForMaskedLM',
'MegaForMultipleChoice',
'MegaForQuestionAnswering',
'MegaForSequenceClassification',
'MegaForTokenClassification',
'MegaModel',
'MegaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 243 | """simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class lowerCAmelCase ( snake_case__ ):
'''simple docstring'''
A = 'facebook/bart-large-mnli'
A = (
'This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '
'should be the text to classify, and `labels`, which should be the list of labels to use for classification. '
'It returns the most likely label in the list of provided `labels` for the input text.'
)
A = 'text_classifier'
A = AutoTokenizer
A = AutoModelForSequenceClassification
A = ['text', ['text']]
A = ['text']
def lowerCamelCase__ ( self :List[Any] ) -> List[Any]:
"""simple docstring"""
super().setup()
UpperCamelCase__ = self.model.config
UpperCamelCase__ = -1
for idx, label in config.idalabel.items():
if label.lower().startswith("entail" ):
UpperCamelCase__ = int(lowerCamelCase_ )
if self.entailment_id == -1:
raise ValueError("Could not determine the entailment ID from the model config, please pass it at init." )
def lowerCamelCase__ ( self :int , lowerCamelCase_ :List[str] , lowerCamelCase_ :List[str] ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ = labels
return self.pre_processor(
[text] * len(lowerCamelCase_ ) , [f'This example is {label}' for label in labels] , return_tensors="pt" , padding="max_length" , )
def lowerCamelCase__ ( self :int , lowerCamelCase_ :int ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ = outputs.logits
UpperCamelCase__ = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id] | 516 | 0 |
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class a_ ( lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase = DebertaTokenizer
__UpperCAmelCase = True
__UpperCAmelCase = DebertaTokenizerFast
def _lowerCAmelCase ( self : Optional[Any] ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE =[
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'[UNK]',
]
SCREAMING_SNAKE_CASE =dict(zip(snake_case ,range(len(snake_case ) ) ) )
SCREAMING_SNAKE_CASE =['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
SCREAMING_SNAKE_CASE ={'unk_token': '[UNK]'}
SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp:
fp.write(json.dumps(snake_case ) + '\n' )
with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp:
fp.write('\n'.join(snake_case ) )
def _lowerCAmelCase ( self : Any ,**snake_case : Optional[int] ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname ,**snake_case )
def _lowerCAmelCase ( self : Union[str, Any] ,snake_case : str ):
SCREAMING_SNAKE_CASE ='lower newer'
SCREAMING_SNAKE_CASE ='lower newer'
return input_text, output_text
def _lowerCAmelCase ( self : str ):
SCREAMING_SNAKE_CASE =self.get_tokenizer()
SCREAMING_SNAKE_CASE ='lower newer'
SCREAMING_SNAKE_CASE =['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
SCREAMING_SNAKE_CASE =tokenizer.tokenize(snake_case )
self.assertListEqual(snake_case ,snake_case )
SCREAMING_SNAKE_CASE =tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE =[0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case ) ,snake_case )
def _lowerCAmelCase ( self : Optional[int] ):
SCREAMING_SNAKE_CASE =self.get_tokenizer()
SCREAMING_SNAKE_CASE =tokenizer('Hello' ,'World' )
SCREAMING_SNAKE_CASE =[0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd['token_type_ids'] ,snake_case )
@slow
def _lowerCAmelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE =self.tokenizer_class.from_pretrained('microsoft/deberta-base' )
SCREAMING_SNAKE_CASE =tokenizer.encode('sequence builders' ,add_special_tokens=snake_case )
SCREAMING_SNAKE_CASE =tokenizer.encode('multi-sequence build' ,add_special_tokens=snake_case )
SCREAMING_SNAKE_CASE =tokenizer.encode(
'sequence builders' ,add_special_tokens=snake_case ,add_prefix_space=snake_case )
SCREAMING_SNAKE_CASE =tokenizer.encode(
'sequence builders' ,'multi-sequence build' ,add_special_tokens=snake_case ,add_prefix_space=snake_case )
SCREAMING_SNAKE_CASE =tokenizer.build_inputs_with_special_tokens(snake_case )
SCREAMING_SNAKE_CASE =tokenizer.build_inputs_with_special_tokens(snake_case ,snake_case )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def _lowerCAmelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE =[self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
SCREAMING_SNAKE_CASE =tokenizer_class.from_pretrained('microsoft/deberta-base' )
SCREAMING_SNAKE_CASE =[
'ALBERT: A Lite BERT for Self-supervised Learning of Language Representations',
'ALBERT incorporates two parameter reduction techniques',
'The first one is a factorized embedding parameterization. By decomposing the large vocabulary'
' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'
' vocabulary embedding.',
]
SCREAMING_SNAKE_CASE =tokenizer(snake_case ,padding=snake_case )
SCREAMING_SNAKE_CASE =[tokenizer.decode(snake_case ,skip_special_tokens=snake_case ) for seq in encoding['input_ids']]
# fmt: off
SCREAMING_SNAKE_CASE ={
'input_ids': [
[1, 2118, 11126, 565, 35, 83, 25191, 163, 18854, 13, 12156, 12, 16101, 25376, 13807, 9, 22205, 27893, 1635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2118, 11126, 565, 24536, 80, 43797, 4878, 7373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3724, 1538, 33183, 11303, 43797, 1938, 4, 870, 24165, 29105, 5, 739, 32644, 33183, 11303, 36173, 88, 80, 650, 7821, 45940, 6, 52, 2559, 5, 1836, 9, 5, 7397, 13171, 31, 5, 1836, 9, 32644, 33183, 11303, 4, 2]
],
'token_type_ids': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'attention_mask': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
SCREAMING_SNAKE_CASE =[
'ALBERT: A Lite BERT for Self-supervised Learning of Language Representations',
'ALBERT incorporates two parameter reduction techniques',
'The first one is a factorized embedding parameterization. By decomposing the large vocabulary'
' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'
' vocabulary embedding.',
]
self.assertDictEqual(encoding.data ,snake_case )
for expected, decoded in zip(snake_case ,snake_case ):
self.assertEqual(snake_case ,snake_case )
| 701 |
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
_lowerCamelCase =argparse.ArgumentParser(
description=(
"Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned"
" Distillation"
)
)
parser.add_argument("--model_type", default="roberta", choices=["roberta", "gpt2"])
parser.add_argument("--model_name", default="roberta-large", type=str)
parser.add_argument("--dump_checkpoint", default="serialization_dir/tf_roberta_048131723.pth", type=str)
parser.add_argument("--vocab_transform", action="store_true")
_lowerCamelCase =parser.parse_args()
if args.model_type == "roberta":
_lowerCamelCase =RobertaForMaskedLM.from_pretrained(args.model_name)
_lowerCamelCase ="roberta"
elif args.model_type == "gpt2":
_lowerCamelCase =GPTaLMHeadModel.from_pretrained(args.model_name)
_lowerCamelCase ="transformer"
_lowerCamelCase =model.state_dict()
_lowerCamelCase ={}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
_lowerCamelCase =state_dict[f'{prefix}.{param_name}']
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
_lowerCamelCase =f'{prefix}.embeddings.{w}.weight'
_lowerCamelCase =state_dict[param_name]
for w in ["weight", "bias"]:
_lowerCamelCase =f'{prefix}.embeddings.LayerNorm.{w}'
_lowerCamelCase =state_dict[param_name]
# Transformer Blocks #
_lowerCamelCase =0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
_lowerCamelCase =state_dict[
f'{prefix}.h.{teacher_idx}.{layer}.{w}'
]
_lowerCamelCase =state_dict[f'{prefix}.h.{teacher_idx}.attn.bias']
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
_lowerCamelCase =state_dict[
f'{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}'
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
_lowerCamelCase =state_dict[f'{layer}']
if args.vocab_transform:
for w in ["weight", "bias"]:
_lowerCamelCase =state_dict[f'lm_head.dense.{w}']
_lowerCamelCase =state_dict[f'lm_head.layer_norm.{w}']
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
_lowerCamelCase =state_dict[f'{prefix}.ln_f.{w}']
_lowerCamelCase =state_dict["lm_head.weight"]
print(f'N layers selected for distillation: {std_idx}')
print(f'Number of params transferred for distillation: {len(compressed_sd.keys())}')
print(f'Save transferred checkpoint to {args.dump_checkpoint}.')
torch.save(compressed_sd, args.dump_checkpoint)
| 252 | 0 |
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
lowerCamelCase : int = False
class A( unittest.TestCase ):
'''simple docstring'''
def a__ ( self : int , A_ : Dict=32 ) -> Any:
"""simple docstring"""
set_seed(0 )
lowerCamelCase_ = UNetaDModel(sample_size=A_ , in_channels=3 , out_channels=3 )
lowerCamelCase_ = torch.optim.SGD(model.parameters() , lr=0.0001 )
return model, optimizer
@slow
def a__ ( self : int ) -> str:
"""simple docstring"""
lowerCamelCase_ = 'cpu' # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
lowerCamelCase_ = DDPMScheduler(
num_train_timesteps=1000 , beta_start=0.0001 , beta_end=0.02 , beta_schedule='linear' , clip_sample=A_ , )
lowerCamelCase_ = DDIMScheduler(
num_train_timesteps=1000 , beta_start=0.0001 , beta_end=0.02 , beta_schedule='linear' , clip_sample=A_ , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
lowerCamelCase_ = [torch.randn((4, 3, 32, 32) ).clip(-1 , 1 ).to(A_ ) for _ in range(4 )]
lowerCamelCase_ = [torch.randn((4, 3, 32, 32) ).to(A_ ) for _ in range(4 )]
lowerCamelCase_ = [torch.randint(0 , 1000 , (4,) ).long().to(A_ ) for _ in range(4 )]
# train with a DDPM scheduler
lowerCamelCase_ , lowerCamelCase_ = self.get_model_optimizer(resolution=32 )
model.train().to(A_ )
for i in range(4 ):
optimizer.zero_grad()
lowerCamelCase_ = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
lowerCamelCase_ = model(A_ , timesteps[i] ).sample
lowerCamelCase_ = torch.nn.functional.mse_loss(A_ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
lowerCamelCase_ , lowerCamelCase_ = self.get_model_optimizer(resolution=32 )
model.train().to(A_ )
for i in range(4 ):
optimizer.zero_grad()
lowerCamelCase_ = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
lowerCamelCase_ = model(A_ , timesteps[i] ).sample
lowerCamelCase_ = torch.nn.functional.mse_loss(A_ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(A_ , A_ , atol=1E-5 ) )
self.assertTrue(torch.allclose(A_ , A_ , atol=1E-5 ) )
| 70 | """simple docstring"""
import os
import pytest
from attr import dataclass
SCREAMING_SNAKE_CASE__:List[str] = """us-east-1""" # defaults region
@dataclass
class snake_case__ :
_snake_case : str
_snake_case : Optional[Any] = """arn:aws:iam::558105141721:role/sagemaker_execution_role"""
_snake_case : Optional[Any] = {
"""task_name""": """mnli""",
"""per_device_train_batch_size""": 16,
"""per_device_eval_batch_size""": 16,
"""do_train""": True,
"""do_eval""": True,
"""do_predict""": True,
"""output_dir""": """/opt/ml/model""",
"""overwrite_output_dir""": True,
"""max_steps""": 500,
"""save_steps""": 5_500,
}
_snake_case : List[str] = {**hyperparameters, """max_steps""": 1_000}
@property
def a__ ( self ):
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def a__ ( self ):
return F"{self.framework}-transfromers-test"
@property
def a__ ( self ):
return F"./tests/sagemaker/scripts/{self.framework}"
@property
def a__ ( self ):
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope="class" )
def _lowerCamelCase( a ):
__a = SageMakerTestEnvironment(framework=request.cls.framework )
| 528 | 0 |
from __future__ import annotations
from math import pi, sqrt
def _lowerCamelCase ( __A : float , __A : float ) -> tuple:
if inductance <= 0:
raise ValueError('''Inductance cannot be 0 or negative''' )
elif capacitance <= 0:
raise ValueError('''Capacitance cannot be 0 or negative''' )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 186 |
import math
def _lowerCamelCase ( __A : int ) -> int:
if not isinstance(__A , __A ):
_UpperCAmelCase : List[Any] = f'''Input value of [number={number}] must be an integer'''
raise TypeError(__A )
if number < 1:
_UpperCAmelCase : Optional[Any] = f'''Input value of [number={number}] must be > 0'''
raise ValueError(__A )
elif number == 1:
return 3
elif number == 2:
return 5
else:
_UpperCAmelCase : Any = int(math.log(number // 3 , 2 ) ) + 2
_UpperCAmelCase : Dict = [3, 5]
_UpperCAmelCase : Tuple = 2
_UpperCAmelCase : Dict = 3
for block in range(1 , __A ):
for _ in range(__A ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(11):
SCREAMING_SNAKE_CASE = 0
try:
SCREAMING_SNAKE_CASE = proth(number)
except ValueError:
print(F'ValueError: there is no {number}th Proth number')
continue
print(F'The {number}th Proth number: {value}')
| 186 | 1 |
'''simple docstring'''
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
__A = {
"iou_prediction_head.layers.0": "iou_prediction_head.proj_in",
"iou_prediction_head.layers.1": "iou_prediction_head.layers.0",
"iou_prediction_head.layers.2": "iou_prediction_head.proj_out",
"mask_decoder.output_upscaling.0": "mask_decoder.upscale_conv1",
"mask_decoder.output_upscaling.1": "mask_decoder.upscale_layer_norm",
"mask_decoder.output_upscaling.3": "mask_decoder.upscale_conv2",
"mask_downscaling.0": "mask_embed.conv1",
"mask_downscaling.1": "mask_embed.layer_norm1",
"mask_downscaling.3": "mask_embed.conv2",
"mask_downscaling.4": "mask_embed.layer_norm2",
"mask_downscaling.6": "mask_embed.conv3",
"point_embeddings": "point_embed",
"pe_layer.positional_encoding_gaussian_matrix": "shared_embedding.positional_embedding",
"image_encoder": "vision_encoder",
"neck.0": "neck.conv1",
"neck.1": "neck.layer_norm1",
"neck.2": "neck.conv2",
"neck.3": "neck.layer_norm2",
"patch_embed.proj": "patch_embed.projection",
".norm": ".layer_norm",
"blocks": "layers",
}
def _A ( lowercase__ ):
lowercase__ = {}
state_dict.pop("""pixel_mean""" , lowercase__ )
state_dict.pop("""pixel_std""" , lowercase__ )
lowercase__ = R""".*.output_hypernetworks_mlps.(\d+).layers.(\d+).*"""
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
lowercase__ = key.replace(lowercase__ , lowercase__ )
if re.match(lowercase__ , lowercase__ ):
lowercase__ = int(re.match(lowercase__ , lowercase__ ).group(2 ) )
if layer_nb == 0:
lowercase__ = key.replace("""layers.0""" , """proj_in""" )
elif layer_nb == 1:
lowercase__ = key.replace("""layers.1""" , """layers.0""" )
elif layer_nb == 2:
lowercase__ = key.replace("""layers.2""" , """proj_out""" )
lowercase__ = value
lowercase__ = model_state_dict[
"""prompt_encoder.shared_embedding.positional_embedding"""
]
return model_state_dict
def _A ( lowercase__ , lowercase__ , lowercase__ , lowercase__="ybelkada/segment-anything" ):
lowercase__ = hf_hub_download(lowercase__ , f'''checkpoints/{model_name}.pth''' )
if "sam_vit_b" in model_name:
lowercase__ = SamConfig()
elif "sam_vit_l" in model_name:
lowercase__ = SamVisionConfig(
hidden_size=1024 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , )
lowercase__ = SamConfig(
vision_config=lowercase__ , )
elif "sam_vit_h" in model_name:
lowercase__ = SamVisionConfig(
hidden_size=1280 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , )
lowercase__ = SamConfig(
vision_config=lowercase__ , )
lowercase__ = torch.load(lowercase__ , map_location="""cpu""" )
lowercase__ = replace_keys(lowercase__ )
lowercase__ = SamImageProcessor()
lowercase__ = SamProcessor(image_processor=lowercase__ )
lowercase__ = SamModel(lowercase__ )
hf_model.load_state_dict(lowercase__ )
lowercase__ = hf_model.to("""cuda""" )
lowercase__ = """https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png"""
lowercase__ = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw ).convert("""RGB""" )
lowercase__ = [[[400, 650]]]
lowercase__ = [[1]]
lowercase__ = processor(images=np.array(lowercase__ ) , return_tensors="""pt""" ).to("""cuda""" )
with torch.no_grad():
lowercase__ = hf_model(**lowercase__ )
lowercase__ = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.5_7_9_8_9_0_2_5_1_1_5_9_6_6_8
lowercase__ = processor(
images=np.array(lowercase__ ) , input_points=lowercase__ , input_labels=lowercase__ , return_tensors="""pt""" ).to("""cuda""" )
with torch.no_grad():
lowercase__ = hf_model(**lowercase__ )
lowercase__ = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9_7_1_2_6_0_3_0_9_2_1_9_3_6_0_4
lowercase__ = ((75, 275, 1725, 850),)
lowercase__ = processor(images=np.array(lowercase__ ) , input_boxes=lowercase__ , return_tensors="""pt""" ).to("""cuda""" )
with torch.no_grad():
lowercase__ = hf_model(**lowercase__ )
lowercase__ = output.iou_scores.squeeze()
assert scores[-1].item() == 0.8_6_8_6_0_1_5_6_0_5_9_2_6_5_1_4
# Test with 2 points and 1 image.
lowercase__ = [[[400, 650], [800, 650]]]
lowercase__ = [[1, 1]]
lowercase__ = processor(
images=np.array(lowercase__ ) , input_points=lowercase__ , input_labels=lowercase__ , return_tensors="""pt""" ).to("""cuda""" )
with torch.no_grad():
lowercase__ = hf_model(**lowercase__ )
lowercase__ = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9_9_3_6_0_4_7_7_9_2_4_3_4_6_9_2
if __name__ == "__main__":
__A = argparse.ArgumentParser()
__A = ["sam_vit_b_01ec64", "sam_vit_h_4b8939", "sam_vit_l_0b3195"]
parser.add_argument(
"--model_name",
default="sam_vit_h_4b8939",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
parser.add_argument(
"--model_hub_id",
default="ybelkada/segment-anything",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
__A = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 325 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_UpperCAmelCase = {"""configuration_unispeech""": ["""UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP""", """UniSpeechConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""UniSpeechForCTC""",
"""UniSpeechForPreTraining""",
"""UniSpeechForSequenceClassification""",
"""UniSpeechModel""",
"""UniSpeechPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 409 | 0 |
"""simple docstring"""
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class a :
def __init__( self : Dict , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : List[str]=13 , lowerCamelCase_ : Tuple=32 , lowerCamelCase_ : Optional[int]=2 , lowerCamelCase_ : int=3 , lowerCamelCase_ : Union[str, Any]=16 , lowerCamelCase_ : List[str]=[32, 64, 1_28] , lowerCamelCase_ : Any=[1, 2, 1] , lowerCamelCase_ : Optional[Any]=[2, 2, 4] , lowerCamelCase_ : Tuple=2 , lowerCamelCase_ : List[str]=2.0 , lowerCamelCase_ : List[str]=True , lowerCamelCase_ : Dict=0.0 , lowerCamelCase_ : Optional[int]=0.0 , lowerCamelCase_ : Tuple=0.1 , lowerCamelCase_ : int="gelu" , lowerCamelCase_ : Tuple=False , lowerCamelCase_ : Tuple=True , lowerCamelCase_ : Any=0.02 , lowerCamelCase_ : int=1E-5 , lowerCamelCase_ : str=True , lowerCamelCase_ : Union[str, Any]=None , lowerCamelCase_ : List[str]=True , lowerCamelCase_ : Any=10 , lowerCamelCase_ : Optional[Any]=8 , lowerCamelCase_ : Any=["stage1", "stage2"] , lowerCamelCase_ : Tuple=[1, 2] , ) -> Optional[int]:
__a = parent
__a = batch_size
__a = image_size
__a = patch_size
__a = num_channels
__a = embed_dim
__a = hidden_sizes
__a = depths
__a = num_heads
__a = window_size
__a = mlp_ratio
__a = qkv_bias
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = drop_path_rate
__a = hidden_act
__a = use_absolute_embeddings
__a = patch_norm
__a = layer_norm_eps
__a = initializer_range
__a = is_training
__a = scope
__a = use_labels
__a = type_sequence_label_size
__a = encoder_stride
__a = out_features
__a = out_indices
def lowerCAmelCase_ ( self : int ) -> Union[str, Any]:
__a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase_ ( self : List[Any] ) -> Any:
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def lowerCAmelCase_ ( self : Tuple , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Any ) -> Tuple:
__a = FocalNetModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
__a = model(lowerCamelCase_ )
__a = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__a = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def lowerCAmelCase_ ( self : List[str] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[Any] ) -> Union[str, Any]:
__a = FocalNetBackbone(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
__a = model(lowerCamelCase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
__a = None
__a = FocalNetBackbone(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
__a = model(lowerCamelCase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowerCAmelCase_ ( self : Any , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Optional[Any] ) -> List[str]:
__a = FocalNetForMaskedImageModeling(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
__a = model(lowerCamelCase_ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__a = 1
__a = FocalNetForMaskedImageModeling(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
__a = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__a = model(lowerCamelCase_ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def lowerCAmelCase_ ( self : str , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Any ) -> Tuple:
__a = self.type_sequence_label_size
__a = FocalNetForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
__a = model(lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__a = 1
__a = FocalNetForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
__a = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__a = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase_ ( self : Tuple ) -> Dict:
__a = self.prepare_config_and_inputs()
__a , __a , __a = config_and_inputs
__a = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a ( A_ , A_ , unittest.TestCase ):
A_ : List[str] = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
A_ : List[Any] = (
{'''feature-extraction''': FocalNetModel, '''image-classification''': FocalNetForImageClassification}
if is_torch_available()
else {}
)
A_ : int = False
A_ : str = False
A_ : Any = False
A_ : Optional[Any] = False
A_ : Union[str, Any] = False
def lowerCAmelCase_ ( self : Any ) -> Optional[Any]:
__a = FocalNetModelTester(self )
__a = ConfigTester(self , config_class=lowerCamelCase_ , embed_dim=37 , has_text_modality=lowerCamelCase_ )
def lowerCAmelCase_ ( self : Any ) -> int:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase_ ( self : Optional[Any] ) -> List[Any]:
return
def lowerCAmelCase_ ( self : List[Any] ) -> Optional[int]:
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def lowerCAmelCase_ ( self : str ) -> List[str]:
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowerCamelCase_ )
def lowerCAmelCase_ ( self : int ) -> Optional[int]:
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase_ )
def lowerCAmelCase_ ( self : str ) -> Dict:
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
@unittest.skip(reason="""FocalNet does not use inputs_embeds""" )
def lowerCAmelCase_ ( self : List[Any] ) -> Dict:
pass
@unittest.skip(reason="""FocalNet does not use feedforward chunking""" )
def lowerCAmelCase_ ( self : List[str] ) -> Optional[Any]:
pass
def lowerCAmelCase_ ( self : Optional[Any] ) -> Any:
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
__a = model_class(lowerCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__a = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase_ , nn.Linear ) )
def lowerCAmelCase_ ( self : int ) -> Optional[int]:
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
__a = model_class(lowerCamelCase_ )
__a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a = [*signature.parameters.keys()]
__a = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCamelCase_ )
def lowerCAmelCase_ ( self : Tuple , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Any , lowerCamelCase_ : List[str] , lowerCamelCase_ : Any ) -> List[Any]:
__a = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
__a = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
__a = outputs.hidden_states
__a = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
# FocalNet has a different seq_length
__a = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__a = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
__a = outputs.reshaped_hidden_states
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
__a , __a , __a , __a = reshaped_hidden_states[0].shape
__a = (
reshaped_hidden_states[0].view(lowerCamelCase_ , lowerCamelCase_ , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def lowerCAmelCase_ ( self : str ) -> Optional[int]:
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
__a = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
__a = True
self.check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a = True
self.check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def lowerCAmelCase_ ( self : Any ) -> List[str]:
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
__a = 3
__a = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__a = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__a = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__a = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
__a = True
self.check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a = True
self.check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , (padded_height, padded_width) )
@slow
def lowerCAmelCase_ ( self : Tuple ) -> Tuple:
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = FocalNetModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def lowerCAmelCase_ ( self : int ) -> Union[str, Any]:
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
__a = _config_zero_init(lowerCamelCase_ )
for model_class in self.all_model_classes:
__a = model_class(config=lowerCamelCase_ )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@require_vision
@require_torch
class a ( unittest.TestCase ):
@cached_property
def lowerCAmelCase_ ( self : Tuple ) -> int:
# TODO update organization
return AutoImageProcessor.from_pretrained("""microsoft/focalnet-tiny""" ) if is_vision_available() else None
@slow
def lowerCAmelCase_ ( self : List[Any] ) -> Union[str, Any]:
__a = FocalNetForImageClassification.from_pretrained("""microsoft/focalnet-tiny""" ).to(lowerCamelCase_ )
__a = self.default_image_processor
__a = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
__a = image_processor(images=lowerCamelCase_ , return_tensors="""pt""" ).to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
__a = model(**lowerCamelCase_ )
# verify the logits
__a = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowerCamelCase_ )
__a = torch.tensor([0.21_66, -0.43_68, 0.21_91] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase_ , atol=1E-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 2_81 )
@require_torch
class a ( A_ , unittest.TestCase ):
A_ : Dict = (FocalNetBackbone,) if is_torch_available() else ()
A_ : Dict = FocalNetConfig
A_ : Any = False
def lowerCAmelCase_ ( self : Dict ) -> int:
__a = FocalNetModelTester(self )
| 707 | """simple docstring"""
from sklearn.metrics import matthews_corrcoef
import datasets
__A = """
Compute the Matthews correlation coefficient (MCC)
The Matthews correlation coefficient is used in machine learning as a
measure of the quality of binary and multiclass classifications. It takes
into account true and false positives and negatives and is generally
regarded as a balanced measure which can be used even if the classes are of
very different sizes. The MCC is in essence a correlation coefficient value
between -1 and +1. A coefficient of +1 represents a perfect prediction, 0
an average random prediction and -1 an inverse prediction. The statistic
is also known as the phi coefficient. [source: Wikipedia]
"""
__A = """
Args:
predictions (list of int): Predicted labels, as returned by a model.
references (list of int): Ground truth labels.
sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.
Returns:
matthews_correlation (dict containing float): Matthews correlation.
Examples:
Example 1, a basic example with only predictions and references as inputs:
>>> matthews_metric = datasets.load_metric(\"matthews_correlation\")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3])
>>> print(round(results['matthews_correlation'], 2))
0.54
Example 2, the same example as above, but also including sample weights:
>>> matthews_metric = datasets.load_metric(\"matthews_correlation\")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 3, 1, 1, 1, 2])
>>> print(round(results['matthews_correlation'], 2))
0.1
Example 3, the same example as above, but with sample weights that cause a negative correlation:
>>> matthews_metric = datasets.load_metric(\"matthews_correlation\")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 1, 0, 0, 0, 1])
>>> print(round(results['matthews_correlation'], 2))
-0.25
"""
__A = """\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
def lowerCAmelCase_ ( self : List[Any] ) -> Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html"""
] , )
def lowerCAmelCase_ ( self : Optional[int] , lowerCamelCase_ : str , lowerCamelCase_ : Any , lowerCamelCase_ : Tuple=None ) -> Dict:
return {
"matthews_correlation": float(matthews_corrcoef(lowerCamelCase_ , lowerCamelCase_ , sample_weight=lowerCamelCase_ ) ),
}
| 173 | 0 |
"""simple docstring"""
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : jnp.ndarray
a : jnp.ndarray
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
"""simple docstring"""
a : int
a : Tuple[int] =(16, 32, 96, 2_56)
a : jnp.dtype =jnp.floataa
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Dict = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowerCAmelCase : List[Any] = []
for i in range(len(self.block_out_channels ) - 1 ):
lowerCAmelCase : List[Any] = self.block_out_channels[i]
lowerCAmelCase : Optional[int] = self.block_out_channels[i + 1]
lowerCAmelCase : Tuple = nn.Conv(
snake_case__ , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(snake_case__ )
lowerCAmelCase : List[str] = nn.Conv(
snake_case__ , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(snake_case__ )
lowerCAmelCase : Tuple = blocks
lowerCAmelCase : List[str] = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Any = self.conv_in(snake_case__ )
lowerCAmelCase : Dict = nn.silu(snake_case__ )
for block in self.blocks:
lowerCAmelCase : Any = block(snake_case__ )
lowerCAmelCase : Optional[Any] = nn.silu(snake_case__ )
lowerCAmelCase : Union[str, Any] = self.conv_out(snake_case__ )
return embedding
@flax_register_to_config
class SCREAMING_SNAKE_CASE__ ( nn.Module , lowercase , lowercase ):
"""simple docstring"""
a : int =32
a : int =4
a : Tuple[str] =(
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
a : Union[bool, Tuple[bool]] =False
a : Tuple[int] =(3_20, 6_40, 12_80, 12_80)
a : int =2
a : Union[int, Tuple[int]] =8
a : Optional[Union[int, Tuple[int]]] =None
a : int =12_80
a : float =0.0
a : bool =False
a : jnp.dtype =jnp.floataa
a : bool =True
a : int =0
a : str ="rgb"
a : Tuple[int] =(16, 32, 96, 2_56)
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : List[str] = (1, self.in_channels, self.sample_size, self.sample_size)
lowerCAmelCase : List[Any] = jnp.zeros(snake_case__ , dtype=jnp.floataa )
lowerCAmelCase : int = jnp.ones((1,) , dtype=jnp.intaa )
lowerCAmelCase : str = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
lowerCAmelCase : int = (1, 3, self.sample_size * 8, self.sample_size * 8)
lowerCAmelCase : Union[str, Any] = jnp.zeros(snake_case__ , dtype=jnp.floataa )
lowerCAmelCase , lowerCAmelCase : Optional[Any] = jax.random.split(snake_case__ )
lowerCAmelCase : Union[str, Any] = {"params": params_rng, "dropout": dropout_rng}
return self.init(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )["params"]
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[str] = self.block_out_channels
lowerCAmelCase : List[str] = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
lowerCAmelCase : Optional[Any] = self.num_attention_heads or self.attention_head_dim
# input
lowerCAmelCase : Union[str, Any] = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
lowerCAmelCase : Any = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
lowerCAmelCase : str = FlaxTimestepEmbedding(snake_case__ , dtype=self.dtype )
lowerCAmelCase : Union[str, Any] = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
lowerCAmelCase : Any = self.only_cross_attention
if isinstance(snake_case__ , snake_case__ ):
lowerCAmelCase : List[str] = (only_cross_attention,) * len(self.down_block_types )
if isinstance(snake_case__ , snake_case__ ):
lowerCAmelCase : Dict = (num_attention_heads,) * len(self.down_block_types )
# down
lowerCAmelCase : List[str] = []
lowerCAmelCase : str = []
lowerCAmelCase : int = block_out_channels[0]
lowerCAmelCase : str = nn.Conv(
snake_case__ , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(snake_case__ )
for i, down_block_type in enumerate(self.down_block_types ):
lowerCAmelCase : Dict = output_channel
lowerCAmelCase : Any = block_out_channels[i]
lowerCAmelCase : Tuple = i == len(snake_case__ ) - 1
if down_block_type == "CrossAttnDownBlock2D":
lowerCAmelCase : Union[str, Any] = FlaxCrossAttnDownBlockaD(
in_channels=snake_case__ , out_channels=snake_case__ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
lowerCAmelCase : int = FlaxDownBlockaD(
in_channels=snake_case__ , out_channels=snake_case__ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(snake_case__ )
for _ in range(self.layers_per_block ):
lowerCAmelCase : Optional[int] = nn.Conv(
snake_case__ , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(snake_case__ )
if not is_final_block:
lowerCAmelCase : Dict = nn.Conv(
snake_case__ , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(snake_case__ )
lowerCAmelCase : str = down_blocks
lowerCAmelCase : Optional[int] = controlnet_down_blocks
# mid
lowerCAmelCase : Tuple = block_out_channels[-1]
lowerCAmelCase : Optional[int] = FlaxUNetMidBlockaDCrossAttn(
in_channels=snake_case__ , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
lowerCAmelCase : Tuple = nn.Conv(
snake_case__ , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ = 1.0 , snake_case__ = True , snake_case__ = False , ):
"""simple docstring"""
lowerCAmelCase : List[str] = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
lowerCAmelCase : Optional[int] = jnp.flip(snake_case__ , axis=1 )
# 1. time
if not isinstance(snake_case__ , jnp.ndarray ):
lowerCAmelCase : Union[str, Any] = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(snake_case__ , jnp.ndarray ) and len(timesteps.shape ) == 0:
lowerCAmelCase : str = timesteps.astype(dtype=jnp.floataa )
lowerCAmelCase : Optional[int] = jnp.expand_dims(snake_case__ , 0 )
lowerCAmelCase : Union[str, Any] = self.time_proj(snake_case__ )
lowerCAmelCase : Tuple = self.time_embedding(snake_case__ )
# 2. pre-process
lowerCAmelCase : Tuple = jnp.transpose(snake_case__ , (0, 2, 3, 1) )
lowerCAmelCase : Union[str, Any] = self.conv_in(snake_case__ )
lowerCAmelCase : List[Any] = jnp.transpose(snake_case__ , (0, 2, 3, 1) )
lowerCAmelCase : Optional[int] = self.controlnet_cond_embedding(snake_case__ )
sample += controlnet_cond
# 3. down
lowerCAmelCase : Union[str, Any] = (sample,)
for down_block in self.down_blocks:
if isinstance(snake_case__ , snake_case__ ):
lowerCAmelCase , lowerCAmelCase : Optional[Any] = down_block(snake_case__ , snake_case__ , snake_case__ , deterministic=not train )
else:
lowerCAmelCase , lowerCAmelCase : int = down_block(snake_case__ , snake_case__ , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
lowerCAmelCase : Union[str, Any] = self.mid_block(snake_case__ , snake_case__ , snake_case__ , deterministic=not train )
# 5. contronet blocks
lowerCAmelCase : Optional[int] = ()
for down_block_res_sample, controlnet_block in zip(snake_case__ , self.controlnet_down_blocks ):
lowerCAmelCase : Any = controlnet_block(snake_case__ )
controlnet_down_block_res_samples += (down_block_res_sample,)
lowerCAmelCase : Dict = controlnet_down_block_res_samples
lowerCAmelCase : Optional[Any] = self.controlnet_mid_block(snake_case__ )
# 6. scaling
lowerCAmelCase : List[Any] = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=snake_case__ , mid_block_res_sample=snake_case__ )
| 645 |
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__=7 , snake_case__=3 , snake_case__=30 , snake_case__=400 , snake_case__=True , snake_case__=None , snake_case__=True , snake_case__=[0.5, 0.5, 0.5] , snake_case__=[0.5, 0.5, 0.5] , snake_case__=True , snake_case__=1 / 255 , snake_case__=True , ):
"""simple docstring"""
lowerCAmelCase : List[Any] = size if size is not None else {"shortest_edge": 18, "longest_edge": 1_333}
lowerCAmelCase : Union[str, Any] = parent
lowerCAmelCase : str = batch_size
lowerCAmelCase : int = num_channels
lowerCAmelCase : Any = min_resolution
lowerCAmelCase : List[str] = max_resolution
lowerCAmelCase : Dict = do_resize
lowerCAmelCase : int = size
lowerCAmelCase : int = do_normalize
lowerCAmelCase : str = image_mean
lowerCAmelCase : Optional[Any] = image_std
lowerCAmelCase : Dict = do_rescale
lowerCAmelCase : Optional[int] = rescale_factor
lowerCAmelCase : Union[str, Any] = do_pad
def lowercase__ ( self ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowercase__ ( self , snake_case__ , snake_case__=False ):
"""simple docstring"""
if not batched:
lowerCAmelCase : Optional[Any] = image_inputs[0]
if isinstance(snake_case__ , Image.Image ):
lowerCAmelCase , lowerCAmelCase : int = image.size
else:
lowerCAmelCase , lowerCAmelCase : int = image.shape[1], image.shape[2]
if w < h:
lowerCAmelCase : Optional[Any] = int(self.size["shortest_edge"] * h / w )
lowerCAmelCase : int = self.size["shortest_edge"]
elif w > h:
lowerCAmelCase : Dict = self.size["shortest_edge"]
lowerCAmelCase : str = int(self.size["shortest_edge"] * w / h )
else:
lowerCAmelCase : Optional[int] = self.size["shortest_edge"]
lowerCAmelCase : Any = self.size["shortest_edge"]
else:
lowerCAmelCase : Union[str, Any] = []
for image in image_inputs:
lowerCAmelCase , lowerCAmelCase : Tuple = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowerCAmelCase : Dict = max(snake_case__ , key=lambda snake_case__ : item[0] )[0]
lowerCAmelCase : int = max(snake_case__ , key=lambda snake_case__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( lowercase , unittest.TestCase ):
"""simple docstring"""
a : Union[str, Any] =DeformableDetrImageProcessor if is_vision_available() else None
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Dict = DeformableDetrImageProcessingTester(self )
@property
def lowercase__ ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case__ , "image_mean" ) )
self.assertTrue(hasattr(snake_case__ , "image_std" ) )
self.assertTrue(hasattr(snake_case__ , "do_normalize" ) )
self.assertTrue(hasattr(snake_case__ , "do_resize" ) )
self.assertTrue(hasattr(snake_case__ , "do_rescale" ) )
self.assertTrue(hasattr(snake_case__ , "do_pad" ) )
self.assertTrue(hasattr(snake_case__ , "size" ) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1_333} )
self.assertEqual(image_processor.do_pad , snake_case__ )
lowerCAmelCase : Optional[Any] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=snake_case__ )
self.assertEqual(image_processor.size , {"shortest_edge": 42, "longest_edge": 84} )
self.assertEqual(image_processor.do_pad , snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , Image.Image )
# Test not batched input
lowerCAmelCase : List[str] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
lowerCAmelCase , lowerCAmelCase : Tuple = self.image_processor_tester.get_expected_values(snake_case__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase , lowerCAmelCase : Optional[int] = self.image_processor_tester.get_expected_values(snake_case__ , batched=snake_case__ )
lowerCAmelCase : List[Any] = image_processing(snake_case__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , numpify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , np.ndarray )
# Test not batched input
lowerCAmelCase : str = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
lowerCAmelCase , lowerCAmelCase : List[str] = self.image_processor_tester.get_expected_values(snake_case__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase : Dict = image_processing(snake_case__ , return_tensors="pt" ).pixel_values
lowerCAmelCase , lowerCAmelCase : Dict = self.image_processor_tester.get_expected_values(snake_case__ , batched=snake_case__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , torchify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , torch.Tensor )
# Test not batched input
lowerCAmelCase : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
lowerCAmelCase , lowerCAmelCase : str = self.image_processor_tester.get_expected_values(snake_case__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase : Dict = image_processing(snake_case__ , return_tensors="pt" ).pixel_values
lowerCAmelCase , lowerCAmelCase : List[Any] = self.image_processor_tester.get_expected_values(snake_case__ , batched=snake_case__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
lowerCAmelCase : List[Any] = json.loads(f.read() )
lowerCAmelCase : List[str] = {"image_id": 39_769, "annotations": target}
# encode them
lowerCAmelCase : Optional[Any] = DeformableDetrImageProcessor()
lowerCAmelCase : Dict = image_processing(images=snake_case__ , annotations=snake_case__ , return_tensors="pt" )
# verify pixel values
lowerCAmelCase : Optional[int] = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding["pixel_values"].shape , snake_case__ )
lowerCAmelCase : Optional[int] = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , snake_case__ , atol=1e-4 ) )
# verify area
lowerCAmelCase : str = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , snake_case__ ) )
# verify boxes
lowerCAmelCase : str = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , snake_case__ )
lowerCAmelCase : Dict = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , snake_case__ , atol=1e-3 ) )
# verify image_id
lowerCAmelCase : Union[str, Any] = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , snake_case__ ) )
# verify is_crowd
lowerCAmelCase : str = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , snake_case__ ) )
# verify class_labels
lowerCAmelCase : Union[str, Any] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , snake_case__ ) )
# verify orig_size
lowerCAmelCase : List[Any] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , snake_case__ ) )
# verify size
lowerCAmelCase : Any = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , snake_case__ ) )
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : str = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
lowerCAmelCase : Optional[int] = json.loads(f.read() )
lowerCAmelCase : Tuple = {"file_name": "000000039769.png", "image_id": 39_769, "segments_info": target}
lowerCAmelCase : Tuple = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
lowerCAmelCase : str = DeformableDetrImageProcessor(format="coco_panoptic" )
lowerCAmelCase : Dict = image_processing(images=snake_case__ , annotations=snake_case__ , masks_path=snake_case__ , return_tensors="pt" )
# verify pixel values
lowerCAmelCase : List[str] = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding["pixel_values"].shape , snake_case__ )
lowerCAmelCase : Union[str, Any] = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , snake_case__ , atol=1e-4 ) )
# verify area
lowerCAmelCase : int = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , snake_case__ ) )
# verify boxes
lowerCAmelCase : List[Any] = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , snake_case__ )
lowerCAmelCase : Optional[int] = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , snake_case__ , atol=1e-3 ) )
# verify image_id
lowerCAmelCase : int = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , snake_case__ ) )
# verify is_crowd
lowerCAmelCase : Any = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , snake_case__ ) )
# verify class_labels
lowerCAmelCase : List[str] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , snake_case__ ) )
# verify masks
lowerCAmelCase : List[Any] = 822_873
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , snake_case__ )
# verify orig_size
lowerCAmelCase : Dict = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , snake_case__ ) )
# verify size
lowerCAmelCase : Tuple = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , snake_case__ ) )
| 645 | 1 |
'''simple docstring'''
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
__A = logging.get_logger(__name__)
@add_end_docstrings(__magic_name__ )
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
def __init__( self , **lowerCamelCase__ ) -> Dict:
'''simple docstring'''
super().__init__(**_lowerCAmelCase )
requires_backends(self , 'vision' )
requires_backends(self , 'torch' )
if self.framework != "pt":
raise ValueError(f"""The {self.__class__} is only available in PyTorch.""" )
self.check_model_type(_lowerCAmelCase )
def lowercase_ ( self , **lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = {}
__lowerCamelCase = {}
__lowerCamelCase = {}
# preprocess args
if "points_per_batch" in kwargs:
__lowerCamelCase = kwargs['points_per_batch']
if "points_per_crop" in kwargs:
__lowerCamelCase = kwargs['points_per_crop']
if "crops_n_layers" in kwargs:
__lowerCamelCase = kwargs['crops_n_layers']
if "crop_overlap_ratio" in kwargs:
__lowerCamelCase = kwargs['crop_overlap_ratio']
if "crop_n_points_downscale_factor" in kwargs:
__lowerCamelCase = kwargs['crop_n_points_downscale_factor']
# postprocess args
if "pred_iou_thresh" in kwargs:
__lowerCamelCase = kwargs['pred_iou_thresh']
if "stability_score_offset" in kwargs:
__lowerCamelCase = kwargs['stability_score_offset']
if "mask_threshold" in kwargs:
__lowerCamelCase = kwargs['mask_threshold']
if "stability_score_thresh" in kwargs:
__lowerCamelCase = kwargs['stability_score_thresh']
if "crops_nms_thresh" in kwargs:
__lowerCamelCase = kwargs['crops_nms_thresh']
if "output_rle_mask" in kwargs:
__lowerCamelCase = kwargs['output_rle_mask']
if "output_bboxes_mask" in kwargs:
__lowerCamelCase = kwargs['output_bboxes_mask']
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self , lowerCamelCase__ , *lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , **lowerCamelCase__ ) -> int:
'''simple docstring'''
return super().__call__(_lowerCAmelCase , *_lowerCAmelCase , num_workers=_lowerCAmelCase , batch_size=_lowerCAmelCase , **_lowerCAmelCase )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__=64 , lowerCamelCase__ = 0 , lowerCamelCase__ = 512 / 1_500 , lowerCamelCase__ = 32 , lowerCamelCase__ = 1 , ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = load_image(_lowerCAmelCase )
__lowerCamelCase = self.image_processor.size['longest_edge']
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self.image_processor.generate_crop_boxes(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
__lowerCamelCase = self.image_processor(images=_lowerCAmelCase , return_tensors='pt' )
with self.device_placement():
if self.framework == "pt":
__lowerCamelCase = self.get_inference_context()
with inference_context():
__lowerCamelCase = self._ensure_tensor_on_device(_lowerCAmelCase , device=self.device )
__lowerCamelCase = self.model.get_image_embeddings(model_inputs.pop('pixel_values' ) )
__lowerCamelCase = image_embeddings
__lowerCamelCase = grid_points.shape[1]
__lowerCamelCase = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
'Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. '
'To return all points at once, set points_per_batch to None' )
for i in range(0 , _lowerCAmelCase , _lowerCAmelCase ):
__lowerCamelCase = grid_points[:, i : i + points_per_batch, :, :]
__lowerCamelCase = input_labels[:, i : i + points_per_batch]
__lowerCamelCase = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__=0.88 , lowerCamelCase__=0.95 , lowerCamelCase__=0 , lowerCamelCase__=1 , ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = model_inputs.pop('input_boxes' )
__lowerCamelCase = model_inputs.pop('is_last' )
__lowerCamelCase = model_inputs.pop('original_sizes' ).tolist()
__lowerCamelCase = model_inputs.pop('reshaped_input_sizes' ).tolist()
__lowerCamelCase = self.model(**_lowerCAmelCase )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
__lowerCamelCase = model_outputs['pred_masks']
__lowerCamelCase = self.image_processor.post_process_masks(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , binarize=_lowerCAmelCase )
__lowerCamelCase = model_outputs['iou_scores']
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__=False , lowerCamelCase__=False , lowerCamelCase__=0.7 , ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = []
__lowerCamelCase = []
__lowerCamelCase = []
for model_output in model_outputs:
all_scores.append(model_output.pop('iou_scores' ) )
all_masks.extend(model_output.pop('masks' ) )
all_boxes.append(model_output.pop('boxes' ) )
__lowerCamelCase = torch.cat(_lowerCAmelCase )
__lowerCamelCase = torch.cat(_lowerCAmelCase )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self.image_processor.post_process_for_mask_generation(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
__lowerCamelCase = defaultdict(_lowerCAmelCase )
for output in model_outputs:
for k, v in output.items():
extra[k].append(_lowerCAmelCase )
__lowerCamelCase = {}
if output_rle_mask:
__lowerCamelCase = rle_mask
if output_bboxes_mask:
__lowerCamelCase = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 709 |
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = ConsistencyModelPipeline
snake_case_ = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
snake_case_ = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
snake_case_ = frozenset(
[
'''num_inference_steps''',
'''generator''',
'''latents''',
'''output_type''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
@property
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
__lowerCamelCase = UNetaDModel.from_pretrained(
'diffusers/consistency-models-test' , subfolder='test_unet' , )
return unet
@property
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = UNetaDModel.from_pretrained(
'diffusers/consistency-models-test' , subfolder='test_unet_class_cond' , )
return unet
def lowercase_ ( self , lowerCamelCase__=False ) -> Dict:
'''simple docstring'''
if class_cond:
__lowerCamelCase = self.dummy_cond_unet
else:
__lowerCamelCase = self.dummy_uncond_unet
# Default to CM multistep sampler
__lowerCamelCase = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
__lowerCamelCase = {
'unet': unet,
'scheduler': scheduler,
}
return components
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__=0 ) -> Optional[Any]:
'''simple docstring'''
if str(lowerCamelCase__ ).startswith('mps' ):
__lowerCamelCase = torch.manual_seed(lowerCamelCase__ )
else:
__lowerCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
__lowerCamelCase = {
'batch_size': 1,
'num_inference_steps': None,
'timesteps': [22, 0],
'generator': generator,
'output_type': 'np',
}
return inputs
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = ConsistencyModelPipeline(**lowerCamelCase__ )
__lowerCamelCase = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = self.get_dummy_inputs(lowerCamelCase__ )
__lowerCamelCase = pipe(**lowerCamelCase__ ).images
assert image.shape == (1, 32, 32, 3)
__lowerCamelCase = image[0, -3:, -3:, -1]
__lowerCamelCase = np.array([0.35_72, 0.62_73, 0.40_31, 0.39_61, 0.43_21, 0.57_30, 0.52_66, 0.47_80, 0.50_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase = self.get_dummy_components(class_cond=lowerCamelCase__ )
__lowerCamelCase = ConsistencyModelPipeline(**lowerCamelCase__ )
__lowerCamelCase = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = self.get_dummy_inputs(lowerCamelCase__ )
__lowerCamelCase = 0
__lowerCamelCase = pipe(**lowerCamelCase__ ).images
assert image.shape == (1, 32, 32, 3)
__lowerCamelCase = image[0, -3:, -3:, -1]
__lowerCamelCase = np.array([0.35_72, 0.62_73, 0.40_31, 0.39_61, 0.43_21, 0.57_30, 0.52_66, 0.47_80, 0.50_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = ConsistencyModelPipeline(**lowerCamelCase__ )
__lowerCamelCase = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = self.get_dummy_inputs(lowerCamelCase__ )
__lowerCamelCase = 1
__lowerCamelCase = None
__lowerCamelCase = pipe(**lowerCamelCase__ ).images
assert image.shape == (1, 32, 32, 3)
__lowerCamelCase = image[0, -3:, -3:, -1]
__lowerCamelCase = np.array([0.50_04, 0.50_04, 0.49_94, 0.50_08, 0.49_76, 0.50_18, 0.49_90, 0.49_82, 0.49_87] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase = self.get_dummy_components(class_cond=lowerCamelCase__ )
__lowerCamelCase = ConsistencyModelPipeline(**lowerCamelCase__ )
__lowerCamelCase = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = self.get_dummy_inputs(lowerCamelCase__ )
__lowerCamelCase = 1
__lowerCamelCase = None
__lowerCamelCase = 0
__lowerCamelCase = pipe(**lowerCamelCase__ ).images
assert image.shape == (1, 32, 32, 3)
__lowerCamelCase = image[0, -3:, -3:, -1]
__lowerCamelCase = np.array([0.50_04, 0.50_04, 0.49_94, 0.50_08, 0.49_76, 0.50_18, 0.49_90, 0.49_82, 0.49_87] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self , lowerCamelCase__=0 , lowerCamelCase__=False , lowerCamelCase__="cpu" , lowerCamelCase__=torch.floataa , lowerCamelCase__=(1, 3, 64, 64) ) -> int:
'''simple docstring'''
__lowerCamelCase = torch.manual_seed(lowerCamelCase__ )
__lowerCamelCase = {
'num_inference_steps': None,
'timesteps': [22, 0],
'class_labels': 0,
'generator': generator,
'output_type': 'np',
}
if get_fixed_latents:
__lowerCamelCase = self.get_fixed_latents(seed=lowerCamelCase__ , device=lowerCamelCase__ , dtype=lowerCamelCase__ , shape=lowerCamelCase__ )
__lowerCamelCase = latents
return inputs
def lowercase_ ( self , lowerCamelCase__=0 , lowerCamelCase__="cpu" , lowerCamelCase__=torch.floataa , lowerCamelCase__=(1, 3, 64, 64) ) -> Optional[int]:
'''simple docstring'''
if type(lowerCamelCase__ ) == str:
__lowerCamelCase = torch.device(lowerCamelCase__ )
__lowerCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
__lowerCamelCase = randn_tensor(lowerCamelCase__ , generator=lowerCamelCase__ , device=lowerCamelCase__ , dtype=lowerCamelCase__ )
return latents
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
__lowerCamelCase = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
__lowerCamelCase = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
__lowerCamelCase = ConsistencyModelPipeline(unet=lowerCamelCase__ , scheduler=lowerCamelCase__ )
pipe.to(torch_device=lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = self.get_inputs()
__lowerCamelCase = pipe(**lowerCamelCase__ ).images
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase = image[0, -3:, -3:, -1]
__lowerCamelCase = np.array([0.08_88, 0.08_81, 0.06_66, 0.04_79, 0.02_92, 0.01_95, 0.02_01, 0.01_63, 0.02_54] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def lowercase_ ( self ) -> Any:
'''simple docstring'''
__lowerCamelCase = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
__lowerCamelCase = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
__lowerCamelCase = ConsistencyModelPipeline(unet=lowerCamelCase__ , scheduler=lowerCamelCase__ )
pipe.to(torch_device=lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = self.get_inputs()
__lowerCamelCase = 1
__lowerCamelCase = None
__lowerCamelCase = pipe(**lowerCamelCase__ ).images
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase = image[0, -3:, -3:, -1]
__lowerCamelCase = np.array([0.03_40, 0.01_52, 0.00_63, 0.02_67, 0.02_21, 0.01_07, 0.04_16, 0.01_86, 0.02_17] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
@require_torch_a
def lowercase_ ( self ) -> int:
'''simple docstring'''
__lowerCamelCase = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
__lowerCamelCase = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
__lowerCamelCase = ConsistencyModelPipeline(unet=lowerCamelCase__ , scheduler=lowerCamelCase__ )
pipe.to(torch_device=lowerCamelCase__ , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = self.get_inputs(get_fixed_latents=lowerCamelCase__ , device=lowerCamelCase__ )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=lowerCamelCase__ , enable_math=lowerCamelCase__ , enable_mem_efficient=lowerCamelCase__ ):
__lowerCamelCase = pipe(**lowerCamelCase__ ).images
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase = image[0, -3:, -3:, -1]
__lowerCamelCase = np.array([0.18_75, 0.14_28, 0.12_89, 0.21_51, 0.20_92, 0.14_77, 0.18_77, 0.16_41, 0.13_53] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@require_torch_a
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
__lowerCamelCase = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
__lowerCamelCase = ConsistencyModelPipeline(unet=lowerCamelCase__ , scheduler=lowerCamelCase__ )
pipe.to(torch_device=lowerCamelCase__ , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = self.get_inputs(get_fixed_latents=lowerCamelCase__ , device=lowerCamelCase__ )
__lowerCamelCase = 1
__lowerCamelCase = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=lowerCamelCase__ , enable_math=lowerCamelCase__ , enable_mem_efficient=lowerCamelCase__ ):
__lowerCamelCase = pipe(**lowerCamelCase__ ).images
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase = image[0, -3:, -3:, -1]
__lowerCamelCase = np.array([0.16_63, 0.19_48, 0.22_75, 0.16_80, 0.12_04, 0.12_45, 0.18_58, 0.13_38, 0.20_95] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
| 167 | 0 |
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
a__ = '''src/diffusers'''
# Matches is_xxx_available()
a__ = re.compile(R'''is\_([a-z_]*)_available\(\)''')
# Matches from xxx import bla
a__ = re.compile(R'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
a__ = '''
{0} = None
'''
a__ = '''
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, {1})
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, {1})
'''
a__ = '''
def {0}(*args, **kwargs):
requires_backends({0}, {1})
'''
def __UpperCAmelCase ( __a : Dict ) -> str:
"""simple docstring"""
_a : Optional[Any] = _re_backend.findall(__a )
if len(__a ) == 0:
return None
return "_and_".join(__a )
def __UpperCAmelCase ( ) -> Union[str, Any]:
"""simple docstring"""
with open(os.path.join(__a ,'''__init__.py''' ) ,'''r''' ,encoding='''utf-8''' ,newline='''\n''' ) as f:
_a : Optional[int] = f.readlines()
# Get to the point we do the actual imports for type checking
_a : List[Any] = 0
_a : Optional[int] = {}
# Go through the end of the file
while line_index < len(__a ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
_a : Any = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith('''else:''' ):
line_index += 1
line_index += 1
_a : List[str] = []
# Until we unindent, add backend objects to the list
while line_index < len(__a ) and len(lines[line_index] ) > 1:
_a : Optional[Any] = lines[line_index]
_a : Any = _re_single_line_import.search(__a )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(__a ) > 0:
_a : Dict = objects
else:
line_index += 1
return backend_specific_objects
def __UpperCAmelCase ( __a : str ,__a : str ) -> str:
"""simple docstring"""
if name.isupper():
return DUMMY_CONSTANT.format(__a )
elif name.islower():
return DUMMY_FUNCTION.format(__a ,__a )
else:
return DUMMY_CLASS.format(__a ,__a )
def __UpperCAmelCase ( __a : List[Any]=None ) -> Tuple:
"""simple docstring"""
if backend_specific_objects is None:
_a : Optional[int] = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
_a : Optional[Any] = {}
for backend, objects in backend_specific_objects.items():
_a : Optional[int] = '''[''' + ''', '''.join(F"""\"{b}\"""" for b in backend.split('''_and_''' ) ) + ''']'''
_a : Optional[Any] = '''# This file is autogenerated by the command `make fix-copies`, do not edit.\n'''
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(__a ,__a ) for o in objects] )
_a : Optional[Any] = dummy_file
return dummy_files
def __UpperCAmelCase ( __a : Union[str, Any]=False ) -> Optional[Any]:
"""simple docstring"""
_a : Any = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
_a : Dict = {'''torch''': '''pt'''}
# Locate actual dummy modules and read their content.
_a : Union[str, Any] = os.path.join(__a ,'''utils''' )
_a : List[Any] = {
backend: os.path.join(__a ,F"""dummy_{short_names.get(__a ,__a )}_objects.py""" )
for backend in dummy_files.keys()
}
_a : List[Any] = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(__a ):
with open(__a ,'''r''' ,encoding='''utf-8''' ,newline='''\n''' ) as f:
_a : Dict = f.read()
else:
_a : str = ''''''
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
F"""Updating diffusers.utils.dummy_{short_names.get(__a ,__a )}_objects.py as the main """
'''__init__ has new objects.''' )
with open(dummy_file_paths[backend] ,'''w''' ,encoding='''utf-8''' ,newline='''\n''' ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
'''The main __init__ has objects that are not present in '''
F"""diffusers.utils.dummy_{short_names.get(__a ,__a )}_objects.py. Run `make fix-copies` """
'''to fix this.''' )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
a__ = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 14 |
'''simple docstring'''
from __future__ import annotations
from random import choice
def a_ ( lowerCamelCase : Optional[Any] ):
return choice(lowerCamelCase )
def a_ ( lowerCamelCase : list[int] , lowerCamelCase : int ):
lowerCAmelCase = random_pivot(lowerCamelCase )
# partition based on pivot
# linear time
lowerCAmelCase = [e for e in lst if e < pivot]
lowerCAmelCase = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(lowerCamelCase ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(lowerCamelCase ) < k - 1:
return kth_number(lowerCamelCase , k - len(lowerCamelCase ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(lowerCamelCase , lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 133 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class __magic_name__ :
__A : int
__A : TreeNode | None = None
__A : TreeNode | None = None
UpperCAmelCase = namedtuple('''CoinsDistribResult''', '''moves excess''')
def lowerCamelCase (a_ :TreeNode | None) -> int:
if root is None:
return 0
# Validation
def count_nodes(a_ :TreeNode | None) -> int:
if node is None:
return 0
return count_nodes(node.left) + count_nodes(node.right) + 1
def count_coins(a_ :TreeNode | None) -> int:
if node is None:
return 0
return count_coins(node.left) + count_coins(node.right) + node.data
if count_nodes(a_) != count_coins(a_):
raise ValueError('''The nodes number should be same as the number of coins''')
# Main calculation
def get_distrib(a_ :TreeNode | None) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1)
lowercase :List[Any] = get_distrib(node.left)
lowercase :List[str] = get_distrib(node.right)
lowercase :str = 1 - left_distrib_excess
lowercase :List[str] = 1 - right_distrib_excess
lowercase :Any = (
left_distrib_moves
+ right_distrib_moves
+ abs(a_)
+ abs(a_)
)
lowercase :Tuple = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(a_ , a_)
return get_distrib(a_)[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 711 |
"""simple docstring"""
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def lowerCamelCase () -> tuple[list[int], int]:
lowercase :Any = [randint(-1000 , 1000) for i in range(10)]
lowercase :Any = randint(-5000 , 5000)
return (arr, r)
UpperCAmelCase = make_dataset()
def lowerCamelCase (a_ :list[int] , a_ :int) -> tuple[int, ...]:
for triplet in permutations(a_ , 3):
if sum(a_) == target:
return tuple(sorted(a_))
return (0, 0, 0)
def lowerCamelCase (a_ :list[int] , a_ :int) -> tuple[int, int, int]:
arr.sort()
lowercase :Union[str, Any] = len(a_)
for i in range(n - 1):
lowercase , lowercase :Union[str, Any] = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def lowerCamelCase () -> tuple[float, float]:
lowercase :int = '''
from __main__ import dataset, triplet_sum1, triplet_sum2
'''
lowercase :Optional[Any] = '''
triplet_sum1(*dataset)
'''
lowercase :Union[str, Any] = '''
triplet_sum2(*dataset)
'''
lowercase :Dict = repeat(setup=a_ , stmt=a_ , repeat=5 , number=1_0000)
lowercase :Optional[int] = repeat(setup=a_ , stmt=a_ , repeat=5 , number=1_0000)
return (min(a_), min(a_))
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCAmelCase = solution_times()
print(F"""The time for naive implementation is {times[0]}.""")
print(F"""The time for optimized implementation is {times[1]}.""")
| 475 | 0 |
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=1e-12):
SCREAMING_SNAKE_CASE = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(_UpperCAmelCase , axis=1) , a_min=_UpperCAmelCase)).T
SCREAMING_SNAKE_CASE = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(_UpperCAmelCase , axis=1) , a_min=_UpperCAmelCase)).T
return jnp.matmul(_UpperCAmelCase , norm_emb_a.T)
class _snake_case ( nn.Module ):
_lowercase : CLIPConfig
_lowercase : jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
SCREAMING_SNAKE_CASE = FlaxCLIPVisionModule(self.config.vision_config)
SCREAMING_SNAKE_CASE = nn.Dense(self.config.projection_dim , use_bias=a , dtype=self.dtype)
SCREAMING_SNAKE_CASE = self.param('concept_embeds' , jax.nn.initializers.ones , (17, self.config.projection_dim))
SCREAMING_SNAKE_CASE = self.param(
'special_care_embeds' , jax.nn.initializers.ones , (3, self.config.projection_dim))
SCREAMING_SNAKE_CASE = self.param('concept_embeds_weights' , jax.nn.initializers.ones , (17,))
SCREAMING_SNAKE_CASE = self.param('special_care_embeds_weights' , jax.nn.initializers.ones , (3,))
def __call__( self , a) -> Tuple:
SCREAMING_SNAKE_CASE = self.vision_model(a)[1]
SCREAMING_SNAKE_CASE = self.visual_projection(a)
SCREAMING_SNAKE_CASE = jax_cosine_distance(a , self.special_care_embeds)
SCREAMING_SNAKE_CASE = jax_cosine_distance(a , self.concept_embeds)
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
SCREAMING_SNAKE_CASE = 0.0
SCREAMING_SNAKE_CASE = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
SCREAMING_SNAKE_CASE = jnp.round(a , 3)
SCREAMING_SNAKE_CASE = jnp.any(special_scores > 0 , axis=1 , keepdims=a)
# Use a lower threshold if an image has any special care concept
SCREAMING_SNAKE_CASE = is_special_care * 0.01
SCREAMING_SNAKE_CASE = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
SCREAMING_SNAKE_CASE = jnp.round(a , 3)
SCREAMING_SNAKE_CASE = jnp.any(concept_scores > 0 , axis=1)
return has_nsfw_concepts
class _snake_case ( A__ ):
_lowercase : Optional[Any] = CLIPConfig
_lowercase : Optional[Any] = '''clip_input'''
_lowercase : Dict = FlaxStableDiffusionSafetyCheckerModule
def __init__( self , a , a = None , a = 0 , a = jnp.floataa , a = True , **a , ) -> Dict:
if input_shape is None:
SCREAMING_SNAKE_CASE = (1, 224, 224, 3)
SCREAMING_SNAKE_CASE = self.module_class(config=a , dtype=a , **a)
super().__init__(a , a , input_shape=a , seed=a , dtype=a , _do_init=_do_init)
def SCREAMING_SNAKE_CASE__ ( self , a , a , a = None) -> FrozenDict:
# init input tensor
SCREAMING_SNAKE_CASE = jax.random.normal(a , a)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = jax.random.split(a)
SCREAMING_SNAKE_CASE = {'params': params_rng, 'dropout': dropout_rng}
SCREAMING_SNAKE_CASE = self.module.init(a , a)['params']
return random_params
def __call__( self , a , a = None , ) -> Optional[int]:
SCREAMING_SNAKE_CASE = jnp.transpose(a , (0, 2, 3, 1))
return self.module.apply(
{'params': params or self.params} , jnp.array(a , dtype=jnp.floataa) , rngs={} , )
| 73 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
A : Union[str, Any] = {
'configuration_convnext': ['CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvNextConfig', 'ConvNextOnnxConfig']
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : str = ['ConvNextFeatureExtractor']
A : Optional[Any] = ['ConvNextImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : str = [
'CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvNextForImageClassification',
'ConvNextModel',
'ConvNextPreTrainedModel',
'ConvNextBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[str] = [
'TFConvNextForImageClassification',
'TFConvNextModel',
'TFConvNextPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
A : int = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 15 | 0 |
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
def _lowerCAmelCase ( _a : Union[List, PIL.Image.Image, torch.Tensor] ) -> Tuple:
warnings.warn(
"""The preprocess method is deprecated and will be removed in a future version. Please"""
""" use VaeImageProcessor.preprocess instead""" , _a , )
if isinstance(_a , torch.Tensor ):
return image
elif isinstance(_a , PIL.Image.Image ):
lowerCAmelCase_ : Any = [image]
if isinstance(image[0] , PIL.Image.Image ):
lowerCAmelCase_ , lowerCAmelCase_ : Tuple = image[0].size
lowerCAmelCase_ , lowerCAmelCase_ : Dict = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
lowerCAmelCase_ : Optional[Any] = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION["""lanczos"""] ) )[None, :] for i in image]
lowerCAmelCase_ : Union[str, Any] = np.concatenate(_a , axis=0 )
lowerCAmelCase_ : List[Any] = np.array(_a ).astype(np.floataa ) / 255.0
lowerCAmelCase_ : int = image.transpose(0 , 3 , 1 , 2 )
lowerCAmelCase_ : Optional[int] = 2.0 * image - 1.0
lowerCAmelCase_ : str = torch.from_numpy(_a )
elif isinstance(image[0] , torch.Tensor ):
lowerCAmelCase_ : List[str] = torch.cat(_a , dim=0 )
return image
def _lowerCAmelCase ( _a : Union[List, PIL.Image.Image, torch.Tensor] ) -> List[str]:
if isinstance(_a , torch.Tensor ):
return mask
elif isinstance(_a , PIL.Image.Image ):
lowerCAmelCase_ : Any = [mask]
if isinstance(mask[0] , PIL.Image.Image ):
lowerCAmelCase_ , lowerCAmelCase_ : Any = mask[0].size
lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
lowerCAmelCase_ : Any = [np.array(m.convert("""L""" ).resize((w, h) , resample=PIL_INTERPOLATION["""nearest"""] ) )[None, :] for m in mask]
lowerCAmelCase_ : int = np.concatenate(_a , axis=0 )
lowerCAmelCase_ : Union[str, Any] = mask.astype(np.floataa ) / 255.0
lowerCAmelCase_ : Optional[int] = 0
lowerCAmelCase_ : Dict = 1
lowerCAmelCase_ : Optional[Any] = torch.from_numpy(_a )
elif isinstance(mask[0] , torch.Tensor ):
lowerCAmelCase_ : int = torch.cat(_a , dim=0 )
return mask
class lowercase__ ( __A ):
__UpperCamelCase = 42
__UpperCamelCase = 42
def __init__( self , _lowercase , _lowercase ):
super().__init__()
self.register_modules(unet=_lowercase , scheduler=_lowercase )
@torch.no_grad()
def __call__( self , _lowercase , _lowercase , _lowercase = 250 , _lowercase = 0.0 , _lowercase = 10 , _lowercase = 10 , _lowercase = None , _lowercase = "pil" , _lowercase = True , ):
lowerCAmelCase_ : List[Any] = image
lowerCAmelCase_ : Dict = _preprocess_image(_lowercase )
lowerCAmelCase_ : List[str] = original_image.to(device=self.device , dtype=self.unet.dtype )
lowerCAmelCase_ : Union[str, Any] = _preprocess_mask(_lowercase )
lowerCAmelCase_ : int = mask_image.to(device=self.device , dtype=self.unet.dtype )
lowerCAmelCase_ : Union[str, Any] = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(_lowercase , _lowercase ) and len(_lowercase ) != batch_size:
raise ValueError(
F'You have passed a list of generators of length {len(_lowercase )}, but requested an effective batch'
F' size of {batch_size}. Make sure the batch size matches the length of the generators.' )
lowerCAmelCase_ : Dict = original_image.shape
lowerCAmelCase_ : Tuple = randn_tensor(_lowercase , generator=_lowercase , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(_lowercase , _lowercase , _lowercase , self.device )
lowerCAmelCase_ : List[str] = eta
lowerCAmelCase_ : str = self.scheduler.timesteps[0] + 1
lowerCAmelCase_ : Optional[Any] = generator[0] if isinstance(_lowercase , _lowercase ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
lowerCAmelCase_ : int = self.unet(_lowercase , _lowercase ).sample
# compute previous image: x_t -> x_t-1
lowerCAmelCase_ : Tuple = self.scheduler.step(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
lowerCAmelCase_ : str = self.scheduler.undo_step(_lowercase , _lowercase , _lowercase )
lowerCAmelCase_ : Any = t
lowerCAmelCase_ : Optional[Any] = (image / 2 + 0.5).clamp(0 , 1 )
lowerCAmelCase_ : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCAmelCase_ : Any = self.numpy_to_pil(_lowercase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowercase )
| 440 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class lowercase__ ( __A ):
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = None
class lowercase__ ( __A , __A ):
__UpperCamelCase = 2
@register_to_config
def __init__( self , _lowercase = 0.02 , _lowercase = 100 , _lowercase = 1.007 , _lowercase = 80 , _lowercase = 0.05 , _lowercase = 50 , ):
# standard deviation of the initial noise distribution
lowerCAmelCase_ : int = sigma_max
# setable values
lowerCAmelCase_ : int = None
lowerCAmelCase_ : np.IntTensor = None
lowerCAmelCase_ : torch.FloatTensor = None # sigma(t_i)
def UpperCAmelCase__ ( self , _lowercase , _lowercase = None ):
return sample
def UpperCAmelCase__ ( self , _lowercase , _lowercase = None ):
lowerCAmelCase_ : int = num_inference_steps
lowerCAmelCase_ : int = np.arange(0 , self.num_inference_steps )[::-1].copy()
lowerCAmelCase_ : List[Any] = torch.from_numpy(_lowercase ).to(_lowercase )
lowerCAmelCase_ : List[str] = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
lowerCAmelCase_ : Optional[int] = torch.tensor(_lowercase , dtype=torch.floataa , device=_lowercase )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase = None ):
if self.config.s_min <= sigma <= self.config.s_max:
lowerCAmelCase_ : List[Any] = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
lowerCAmelCase_ : List[str] = 0
# sample eps ~ N(0, S_noise^2 * I)
lowerCAmelCase_ : List[Any] = self.config.s_noise * randn_tensor(sample.shape , generator=_lowercase ).to(sample.device )
lowerCAmelCase_ : Optional[int] = sigma + gamma * sigma
lowerCAmelCase_ : int = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase = True , ):
lowerCAmelCase_ : Union[str, Any] = sample_hat + sigma_hat * model_output
lowerCAmelCase_ : Dict = (sample_hat - pred_original_sample) / sigma_hat
lowerCAmelCase_ : List[Any] = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=_lowercase , derivative=_lowercase , pred_original_sample=_lowercase )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase = True , ):
lowerCAmelCase_ : str = sample_prev + sigma_prev * model_output
lowerCAmelCase_ : List[Any] = (sample_prev - pred_original_sample) / sigma_prev
lowerCAmelCase_ : Tuple = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=_lowercase , derivative=_lowercase , pred_original_sample=_lowercase )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase ):
raise NotImplementedError()
| 440 | 1 |
'''simple docstring'''
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def SCREAMING_SNAKE_CASE ( *lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[Union[Dict, Any]] = None , lowerCAmelCase__ : List[str]=True , lowerCAmelCase__ : Optional[int]=2) -> Union[str, Any]:
'''simple docstring'''
from .. import __version__
_lowercase : Dict = take_from
_lowercase : Optional[int] = ()
if not isinstance(args[0] , lowerCAmelCase__):
_lowercase : List[str] = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(lowerCAmelCase__).base_version) >= version.parse(lowerCAmelCase__):
raise ValueError(
F'''The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\''''
F''' version {__version__} is >= {version_name}''')
_lowercase : Dict = None
if isinstance(lowerCAmelCase__ , lowerCAmelCase__) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(lowerCAmelCase__),)
_lowercase : Optional[int] = F'''The `{attribute}` argument is deprecated and will be removed in version {version_name}.'''
elif hasattr(lowerCAmelCase__ , lowerCAmelCase__):
values += (getattr(lowerCAmelCase__ , lowerCAmelCase__),)
_lowercase : Union[str, Any] = F'''The `{attribute}` attribute is deprecated and will be removed in version {version_name}.'''
elif deprecated_kwargs is None:
_lowercase : Optional[Any] = F'''`{attribute}` is deprecated and will be removed in version {version_name}.'''
if warning is not None:
_lowercase : str = warning + ' ' if standard_warn else ''
warnings.warn(warning + message , lowerCAmelCase__ , stacklevel=lowerCAmelCase__)
if isinstance(lowerCAmelCase__ , lowerCAmelCase__) and len(lowerCAmelCase__) > 0:
_lowercase : Tuple = inspect.getouterframes(inspect.currentframe())[1]
_lowercase : int = call_frame.filename
_lowercase : Optional[Any] = call_frame.lineno
_lowercase : List[str] = call_frame.function
_lowercase , _lowercase : Optional[Any] = next(iter(deprecated_kwargs.items()))
raise TypeError(F'''{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`''')
if len(lowerCAmelCase__) == 0:
return
elif len(lowerCAmelCase__) == 1:
return values[0]
return values | 125 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase__ : torch.FloatTensor
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
@register_to_config
def __init__( self : Optional[Any] ,UpperCamelCase : int = 3 ,UpperCamelCase : int = 3 ,UpperCamelCase : Tuple[str] = ("DownEncoderBlock2D",) ,UpperCamelCase : Tuple[str] = ("UpDecoderBlock2D",) ,UpperCamelCase : Tuple[int] = (64,) ,UpperCamelCase : int = 1 ,UpperCamelCase : str = "silu" ,UpperCamelCase : int = 3 ,UpperCamelCase : int = 32 ,UpperCamelCase : int = 256 ,UpperCamelCase : int = 32 ,UpperCamelCase : Optional[int] = None ,UpperCamelCase : float = 0.1_8_2_1_5 ,UpperCamelCase : str = "group" ,) -> List[Any]:
super().__init__()
# pass init params to Encoder
_lowercase : Any = Encoder(
in_channels=UpperCamelCase ,out_channels=UpperCamelCase ,down_block_types=UpperCamelCase ,block_out_channels=UpperCamelCase ,layers_per_block=UpperCamelCase ,act_fn=UpperCamelCase ,norm_num_groups=UpperCamelCase ,double_z=UpperCamelCase ,)
_lowercase : Tuple = vq_embed_dim if vq_embed_dim is not None else latent_channels
_lowercase : List[str] = nn.Convad(UpperCamelCase ,UpperCamelCase ,1 )
_lowercase : Dict = VectorQuantizer(UpperCamelCase ,UpperCamelCase ,beta=0.2_5 ,remap=UpperCamelCase ,sane_index_shape=UpperCamelCase )
_lowercase : List[str] = nn.Convad(UpperCamelCase ,UpperCamelCase ,1 )
# pass init params to Decoder
_lowercase : Optional[Any] = Decoder(
in_channels=UpperCamelCase ,out_channels=UpperCamelCase ,up_block_types=UpperCamelCase ,block_out_channels=UpperCamelCase ,layers_per_block=UpperCamelCase ,act_fn=UpperCamelCase ,norm_num_groups=UpperCamelCase ,norm_type=UpperCamelCase ,)
@apply_forward_hook
def _lowerCamelCase ( self : Optional[int] ,UpperCamelCase : torch.FloatTensor ,UpperCamelCase : bool = True ) -> VQEncoderOutput:
_lowercase : Optional[int] = self.encoder(UpperCamelCase )
_lowercase : Any = self.quant_conv(UpperCamelCase )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=UpperCamelCase )
@apply_forward_hook
def _lowerCamelCase ( self : Optional[int] ,UpperCamelCase : torch.FloatTensor ,UpperCamelCase : bool = False ,UpperCamelCase : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
# also go through quantization layer
if not force_not_quantize:
_lowercase , _lowercase , _lowercase : Optional[int] = self.quantize(UpperCamelCase )
else:
_lowercase : Tuple = h
_lowercase : Optional[int] = self.post_quant_conv(UpperCamelCase )
_lowercase : Dict = self.decoder(UpperCamelCase ,quant if self.config.norm_type == 'spatial' else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=UpperCamelCase )
def _lowerCamelCase ( self : Optional[Any] ,UpperCamelCase : torch.FloatTensor ,UpperCamelCase : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
_lowercase : Dict = sample
_lowercase : Dict = self.encode(UpperCamelCase ).latents
_lowercase : str = self.decode(UpperCamelCase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=UpperCamelCase ) | 125 | 1 |
"""simple docstring"""
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class lowerCAmelCase_ ( nn.Module ):
"""simple docstring"""
_lowerCAmelCase : int = 42
_lowerCAmelCase : Optional[Any] = 42
_lowerCAmelCase : Optional[Any] = 0.0
_lowerCAmelCase : List[str] = 1
_lowerCAmelCase : Any = 1
_lowerCAmelCase : List[str] = True
_lowerCAmelCase : Union[str, Any] = False
_lowerCAmelCase : List[Any] = False
_lowerCAmelCase : List[Any] = False
_lowerCAmelCase : List[str] = jnp.floataa
def snake_case ( self ):
"""simple docstring"""
snake_case = []
snake_case = []
for i in range(self.num_layers ):
snake_case = self.in_channels if i == 0 else self.out_channels
snake_case = FlaxResnetBlockaD(
in_channels=lowercase_ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowercase_ )
snake_case = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(lowercase_ )
snake_case = resnets
snake_case = attentions
if self.add_downsample:
snake_case = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=True ):
"""simple docstring"""
snake_case = ()
for resnet, attn in zip(self.resnets , self.attentions ):
snake_case = resnet(lowercase_ , lowercase_ , deterministic=lowercase_ )
snake_case = attn(lowercase_ , lowercase_ , deterministic=lowercase_ )
output_states += (hidden_states,)
if self.add_downsample:
snake_case = self.downsamplers_a(lowercase_ )
output_states += (hidden_states,)
return hidden_states, output_states
class lowerCAmelCase_ ( nn.Module ):
"""simple docstring"""
_lowerCAmelCase : str = 42
_lowerCAmelCase : str = 42
_lowerCAmelCase : List[Any] = 0.0
_lowerCAmelCase : Optional[Any] = 1
_lowerCAmelCase : Optional[Any] = True
_lowerCAmelCase : List[str] = jnp.floataa
def snake_case ( self ):
"""simple docstring"""
snake_case = []
for i in range(self.num_layers ):
snake_case = self.in_channels if i == 0 else self.out_channels
snake_case = FlaxResnetBlockaD(
in_channels=lowercase_ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowercase_ )
snake_case = resnets
if self.add_downsample:
snake_case = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=True ):
"""simple docstring"""
snake_case = ()
for resnet in self.resnets:
snake_case = resnet(lowercase_ , lowercase_ , deterministic=lowercase_ )
output_states += (hidden_states,)
if self.add_downsample:
snake_case = self.downsamplers_a(lowercase_ )
output_states += (hidden_states,)
return hidden_states, output_states
class lowerCAmelCase_ ( nn.Module ):
"""simple docstring"""
_lowerCAmelCase : Any = 42
_lowerCAmelCase : str = 42
_lowerCAmelCase : int = 42
_lowerCAmelCase : Optional[int] = 0.0
_lowerCAmelCase : Union[str, Any] = 1
_lowerCAmelCase : int = 1
_lowerCAmelCase : Union[str, Any] = True
_lowerCAmelCase : Any = False
_lowerCAmelCase : List[str] = False
_lowerCAmelCase : Dict = False
_lowerCAmelCase : Any = jnp.floataa
def snake_case ( self ):
"""simple docstring"""
snake_case = []
snake_case = []
for i in range(self.num_layers ):
snake_case = self.in_channels if (i == self.num_layers - 1) else self.out_channels
snake_case = self.prev_output_channel if i == 0 else self.out_channels
snake_case = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowercase_ )
snake_case = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(lowercase_ )
snake_case = resnets
snake_case = attentions
if self.add_upsample:
snake_case = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=True ):
"""simple docstring"""
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
snake_case = res_hidden_states_tuple[-1]
snake_case = res_hidden_states_tuple[:-1]
snake_case = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
snake_case = resnet(lowercase_ , lowercase_ , deterministic=lowercase_ )
snake_case = attn(lowercase_ , lowercase_ , deterministic=lowercase_ )
if self.add_upsample:
snake_case = self.upsamplers_a(lowercase_ )
return hidden_states
class lowerCAmelCase_ ( nn.Module ):
"""simple docstring"""
_lowerCAmelCase : Optional[Any] = 42
_lowerCAmelCase : Dict = 42
_lowerCAmelCase : Optional[Any] = 42
_lowerCAmelCase : Tuple = 0.0
_lowerCAmelCase : List[Any] = 1
_lowerCAmelCase : Dict = True
_lowerCAmelCase : List[str] = jnp.floataa
def snake_case ( self ):
"""simple docstring"""
snake_case = []
for i in range(self.num_layers ):
snake_case = self.in_channels if (i == self.num_layers - 1) else self.out_channels
snake_case = self.prev_output_channel if i == 0 else self.out_channels
snake_case = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowercase_ )
snake_case = resnets
if self.add_upsample:
snake_case = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=True ):
"""simple docstring"""
for resnet in self.resnets:
# pop res hidden states
snake_case = res_hidden_states_tuple[-1]
snake_case = res_hidden_states_tuple[:-1]
snake_case = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
snake_case = resnet(lowercase_ , lowercase_ , deterministic=lowercase_ )
if self.add_upsample:
snake_case = self.upsamplers_a(lowercase_ )
return hidden_states
class lowerCAmelCase_ ( nn.Module ):
"""simple docstring"""
_lowerCAmelCase : str = 42
_lowerCAmelCase : Tuple = 0.0
_lowerCAmelCase : Any = 1
_lowerCAmelCase : Optional[int] = 1
_lowerCAmelCase : Optional[int] = False
_lowerCAmelCase : int = False
_lowerCAmelCase : Dict = jnp.floataa
def snake_case ( self ):
"""simple docstring"""
snake_case = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
snake_case = []
for _ in range(self.num_layers ):
snake_case = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(lowercase_ )
snake_case = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowercase_ )
snake_case = resnets
snake_case = attentions
def __call__( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=True ):
"""simple docstring"""
snake_case = self.resnets[0](lowercase_ , lowercase_ )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
snake_case = attn(lowercase_ , lowercase_ , deterministic=lowercase_ )
snake_case = resnet(lowercase_ , lowercase_ , deterministic=lowercase_ )
return hidden_states
| 708 | """simple docstring"""
def lowerCAmelCase__ ( _UpperCamelCase : int , _UpperCamelCase : int ) -> int:
"""simple docstring"""
return int((input_a, input_a).count(0 ) == 0 )
def lowerCAmelCase__ ( ) -> None:
"""simple docstring"""
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 104 | 0 |
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase_ ( _UpperCAmelCase : list , _UpperCAmelCase : int | None = None , _UpperCAmelCase : int | None = None ) -> List[str]:
"""simple docstring"""
if start is None:
_UpperCAmelCase : Any = 0
if end is None:
_UpperCAmelCase : List[str] = len(_UpperCAmelCase ) - 1
if start >= end:
return
_UpperCAmelCase : Optional[int] = (start + end) // 2
slowsort(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
slowsort(_UpperCAmelCase , mid + 1 , _UpperCAmelCase )
if sequence[end] < sequence[mid]:
_UpperCAmelCase : Union[str, Any] = sequence[mid], sequence[end]
slowsort(_UpperCAmelCase , _UpperCAmelCase , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 244 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
a_ :int = 16
a_ :Tuple = 32
def lowercase_ (A : Accelerator , A : int = 1_6 ):
snake_case__ : Union[str, Any] = AutoTokenizer.from_pretrained('bert-base-cased' )
snake_case__ : Tuple = load_dataset('glue' , 'mrpc' )
def tokenize_function(A : int ):
# max_length=None => use the model max length (it's actually the default)
snake_case__ : Any = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=A , max_length=A )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
snake_case__ : Union[str, Any] = datasets.map(
A , batched=A , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case__ : List[Any] = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(A : str ):
# On TPU it's best to pad everything to the same length or training will be very slow.
snake_case__ : Any = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
snake_case__ : Tuple = 1_6
elif accelerator.mixed_precision != "no":
snake_case__ : Optional[int] = 8
else:
snake_case__ : Optional[Any] = None
return tokenizer.pad(
A , padding='longest' , max_length=A , pad_to_multiple_of=A , return_tensors='pt' , )
# Instantiate dataloaders.
snake_case__ : Union[str, Any] = DataLoader(
tokenized_datasets['train'] , shuffle=A , collate_fn=A , batch_size=A )
snake_case__ : int = DataLoader(
tokenized_datasets['validation'] , shuffle=A , collate_fn=A , batch_size=A )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
a_ :Tuple = mocked_dataloaders # noqa: F811
def lowercase_ (A : List[str] , A : Optional[Any] ):
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS' , A ) == "1":
snake_case__ : int = 2
# Initialize accelerator
snake_case__ : Union[str, Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case__ : Any = config['lr']
snake_case__ : Dict = int(config['num_epochs'] )
snake_case__ : Tuple = int(config['seed'] )
snake_case__ : Any = int(config['batch_size'] )
snake_case__ : Optional[int] = evaluate.load('glue' , 'mrpc' )
# If the batch size is too big we use gradient accumulation
snake_case__ : Dict = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
snake_case__ : int = batch_size // MAX_GPU_BATCH_SIZE
snake_case__ : List[Any] = MAX_GPU_BATCH_SIZE
set_seed(A )
snake_case__ , snake_case__ : str = get_dataloaders(A , A )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case__ : Optional[Any] = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=A )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
snake_case__ : Union[str, Any] = model.to(accelerator.device )
# Instantiate optimizer
snake_case__ : int = AdamW(params=model.parameters() , lr=A )
# Instantiate scheduler
snake_case__ : Any = get_linear_schedule_with_warmup(
optimizer=A , num_warmup_steps=1_0_0 , num_training_steps=(len(A ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ : Optional[int] = accelerator.prepare(
A , A , A , A , A )
# Now we train the model
for epoch in range(A ):
model.train()
for step, batch in enumerate(A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
snake_case__ : int = model(**A )
snake_case__ : Optional[Any] = outputs.loss
snake_case__ : List[str] = loss / gradient_accumulation_steps
accelerator.backward(A )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
snake_case__ : Optional[Any] = 0
for step, batch in enumerate(A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case__ : Optional[Any] = model(**A )
snake_case__ : Dict = outputs.logits.argmax(dim=-1 )
snake_case__ , snake_case__ : int = accelerator.gather((predictions, batch['labels']) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(A ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
snake_case__ : Union[str, Any] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
snake_case__ : Optional[Any] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=A , references=A , )
snake_case__ : str = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , A )
def lowercase_ ():
snake_case__ : Optional[Any] = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=A , default=A , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
snake_case__ : str = parser.parse_args()
snake_case__ : Any = {'lr': 2e-5, 'num_epochs': 3, 'seed': 4_2, 'batch_size': 1_6}
training_function(A , A )
if __name__ == "__main__":
main()
| 478 | 0 |
"""simple docstring"""
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class snake_case :
'''simple docstring'''
def __init__( self : Optional[int], _lowerCamelCase : List[Any], _lowerCamelCase : Any=2, _lowerCamelCase : Any=8, _lowerCamelCase : Optional[Any]=True, _lowerCamelCase : List[str]=True, _lowerCamelCase : Optional[int]=True, _lowerCamelCase : Union[str, Any]=True, _lowerCamelCase : Optional[Any]=99, _lowerCamelCase : List[Any]=16, _lowerCamelCase : int=5, _lowerCamelCase : List[str]=2, _lowerCamelCase : int=36, _lowerCamelCase : List[Any]="gelu", _lowerCamelCase : Union[str, Any]=0.0, _lowerCamelCase : Optional[Any]=0.0, _lowerCamelCase : List[Any]=5_12, _lowerCamelCase : Union[str, Any]=16, _lowerCamelCase : str=2, _lowerCamelCase : List[Any]=0.02, _lowerCamelCase : int=3, _lowerCamelCase : Any=4, _lowerCamelCase : Dict=None, ):
'''simple docstring'''
__A = parent
__A = batch_size
__A = seq_length
__A = is_training
__A = use_input_mask
__A = use_token_type_ids
__A = use_labels
__A = vocab_size
__A = hidden_size
__A = num_hidden_layers
__A = num_attention_heads
__A = intermediate_size
__A = hidden_act
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = max_position_embeddings
__A = type_vocab_size
__A = type_sequence_label_size
__A = initializer_range
__A = num_labels
__A = num_choices
__A = scope
def _SCREAMING_SNAKE_CASE ( self : int ):
'''simple docstring'''
__A = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
__A = None
if self.use_input_mask:
__A = random_attention_mask([self.batch_size, self.seq_length] )
__A = None
if self.use_token_type_ids:
__A = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
__A = None
__A = None
__A = None
if self.use_labels:
__A = ids_tensor([self.batch_size], self.type_sequence_label_size )
__A = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
__A = ids_tensor([self.batch_size], self.num_choices )
__A = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
'''simple docstring'''
return MraConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=_lowerCamelCase, initializer_range=self.initializer_range, )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
'''simple docstring'''
__A = self.get_config()
__A = 3_00
return config
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
'''simple docstring'''
(
(
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) ,
) = self.prepare_config_and_inputs()
__A = True
__A = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__A = ids_tensor([self.batch_size, self.seq_length], vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any], _lowerCamelCase : List[str], _lowerCamelCase : List[Any], _lowerCamelCase : List[Any], _lowerCamelCase : int, _lowerCamelCase : Any, _lowerCamelCase : Optional[Any], _lowerCamelCase : Any ):
'''simple docstring'''
__A = MraModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
__A = model(_lowerCamelCase, attention_mask=_lowerCamelCase, token_type_ids=_lowerCamelCase )
__A = model(_lowerCamelCase, token_type_ids=_lowerCamelCase )
__A = model(_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any], _lowerCamelCase : Optional[Any], _lowerCamelCase : List[Any], _lowerCamelCase : List[Any], _lowerCamelCase : Union[str, Any], _lowerCamelCase : Optional[int], _lowerCamelCase : Optional[Any], _lowerCamelCase : str, _lowerCamelCase : Any, _lowerCamelCase : List[str], ):
'''simple docstring'''
__A = True
__A = MraModel(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
__A = model(
_lowerCamelCase, attention_mask=_lowerCamelCase, token_type_ids=_lowerCamelCase, encoder_hidden_states=_lowerCamelCase, encoder_attention_mask=_lowerCamelCase, )
__A = model(
_lowerCamelCase, attention_mask=_lowerCamelCase, token_type_ids=_lowerCamelCase, encoder_hidden_states=_lowerCamelCase, )
__A = model(_lowerCamelCase, attention_mask=_lowerCamelCase, token_type_ids=_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : Tuple, _lowerCamelCase : Dict, _lowerCamelCase : Dict, _lowerCamelCase : Any, _lowerCamelCase : Dict, _lowerCamelCase : int, _lowerCamelCase : List[Any], _lowerCamelCase : Tuple ):
'''simple docstring'''
__A = MraForMaskedLM(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
__A = model(_lowerCamelCase, attention_mask=_lowerCamelCase, token_type_ids=_lowerCamelCase, labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def _SCREAMING_SNAKE_CASE ( self : Any, _lowerCamelCase : List[str], _lowerCamelCase : Tuple, _lowerCamelCase : List[Any], _lowerCamelCase : Dict, _lowerCamelCase : int, _lowerCamelCase : Optional[int], _lowerCamelCase : Tuple ):
'''simple docstring'''
__A = MraForQuestionAnswering(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
__A = model(
_lowerCamelCase, attention_mask=_lowerCamelCase, token_type_ids=_lowerCamelCase, start_positions=_lowerCamelCase, end_positions=_lowerCamelCase, )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def _SCREAMING_SNAKE_CASE ( self : List[Any], _lowerCamelCase : List[Any], _lowerCamelCase : List[str], _lowerCamelCase : Optional[int], _lowerCamelCase : Any, _lowerCamelCase : Optional[int], _lowerCamelCase : Optional[int], _lowerCamelCase : Optional[int] ):
'''simple docstring'''
__A = self.num_labels
__A = MraForSequenceClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
__A = model(_lowerCamelCase, attention_mask=_lowerCamelCase, token_type_ids=_lowerCamelCase, labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any], _lowerCamelCase : str, _lowerCamelCase : List[Any], _lowerCamelCase : int, _lowerCamelCase : Optional[int], _lowerCamelCase : Optional[Any], _lowerCamelCase : List[Any], _lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
__A = self.num_labels
__A = MraForTokenClassification(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
__A = model(_lowerCamelCase, attention_mask=_lowerCamelCase, token_type_ids=_lowerCamelCase, labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any], _lowerCamelCase : Any, _lowerCamelCase : Optional[int], _lowerCamelCase : int, _lowerCamelCase : List[str], _lowerCamelCase : Union[str, Any], _lowerCamelCase : List[str], _lowerCamelCase : Tuple ):
'''simple docstring'''
__A = self.num_choices
__A = MraForMultipleChoice(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
__A = input_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
__A = token_type_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
__A = input_mask.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
__A = model(
_lowerCamelCase, attention_mask=_lowerCamelCase, token_type_ids=_lowerCamelCase, labels=_lowerCamelCase, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) )
def _SCREAMING_SNAKE_CASE ( self : str ):
'''simple docstring'''
__A = self.prepare_config_and_inputs()
(
(
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) ,
) = config_and_inputs
__A = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class snake_case ( _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
A_ : Any = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
A_ : str = False
A_ : str = False
A_ : int = False
A_ : Union[str, Any] = False
A_ : Any = ()
def _SCREAMING_SNAKE_CASE ( self : Dict ):
'''simple docstring'''
__A = MraModelTester(self )
__A = ConfigTester(self, config_class=_lowerCamelCase, hidden_size=37 )
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
'''simple docstring'''
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
'''simple docstring'''
__A = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__A = type
self.model_tester.create_and_check_model(*_lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
'''simple docstring'''
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
'''simple docstring'''
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : str ):
'''simple docstring'''
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
'''simple docstring'''
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
'''simple docstring'''
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_lowerCamelCase )
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
'''simple docstring'''
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A = MraModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
@unittest.skip(reason='''MRA does not output attentions''' )
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
'''simple docstring'''
return
@require_torch
class snake_case ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
__A = MraModel.from_pretrained('''uw-madison/mra-base-512-4''' )
__A = torch.arange(2_56 ).unsqueeze(0 )
with torch.no_grad():
__A = model(_lowerCamelCase )[0]
__A = torch.Size((1, 2_56, 7_68) )
self.assertEqual(output.shape, _lowerCamelCase )
__A = torch.tensor(
[[[-0.01_40, 0.08_30, -0.03_81], [0.15_46, 0.14_02, 0.02_20], [0.11_62, 0.08_51, 0.01_65]]] )
self.assertTrue(torch.allclose(output[:, :3, :3], _lowerCamelCase, atol=1e-4 ) )
@slow
def _SCREAMING_SNAKE_CASE ( self : Any ):
'''simple docstring'''
__A = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-512-4''' )
__A = torch.arange(2_56 ).unsqueeze(0 )
with torch.no_grad():
__A = model(_lowerCamelCase )[0]
__A = 5_02_65
__A = torch.Size((1, 2_56, vocab_size) )
self.assertEqual(output.shape, _lowerCamelCase )
__A = torch.tensor(
[[[9.25_95, -3.60_38, 11.88_19], [9.38_69, -3.26_93, 11.09_56], [11.85_24, -3.49_38, 13.12_10]]] )
self.assertTrue(torch.allclose(output[:, :3, :3], _lowerCamelCase, atol=1e-4 ) )
@slow
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
'''simple docstring'''
__A = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-4096-8-d3''' )
__A = torch.arange(40_96 ).unsqueeze(0 )
with torch.no_grad():
__A = model(_lowerCamelCase )[0]
__A = 5_02_65
__A = torch.Size((1, 40_96, vocab_size) )
self.assertEqual(output.shape, _lowerCamelCase )
__A = torch.tensor(
[[[5.47_89, -2.35_64, 7.50_64], [7.90_67, -1.33_69, 9.96_68], [9.07_12, -1.81_06, 7.03_80]]] )
self.assertTrue(torch.allclose(output[:, :3, :3], _lowerCamelCase, atol=1e-4 ) )
| 713 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
lowercase_ = 'Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'
def lowerCAmelCase ( ):
"""simple docstring"""
__A = _ask_options(
'''In which compute environment are you running?''' , ['''This machine''', '''AWS (Amazon SageMaker)'''] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
__A = get_sagemaker_input()
else:
__A = get_cluster_input()
return config
def lowerCAmelCase ( __UpperCamelCase=None ):
"""simple docstring"""
if subparsers is not None:
__A = subparsers.add_parser('''config''' , description=__UpperCamelCase )
else:
__A = argparse.ArgumentParser('''Accelerate config command''' , description=__UpperCamelCase )
parser.add_argument(
'''--config_file''' , default=__UpperCamelCase , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , )
if subparsers is not None:
parser.set_defaults(func=__UpperCamelCase )
return parser
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
__A = get_user_input()
if args.config_file is not None:
__A = args.config_file
else:
if not os.path.isdir(__UpperCamelCase ):
os.makedirs(__UpperCamelCase )
__A = default_yaml_config_file
if config_file.endswith('''.json''' ):
config.to_json_file(__UpperCamelCase )
else:
config.to_yaml_file(__UpperCamelCase )
print(f'accelerate configuration saved at {config_file}' )
def lowerCAmelCase ( ):
"""simple docstring"""
__A = config_command_parser()
__A = parser.parse_args()
config_command(__UpperCamelCase )
if __name__ == "__main__":
main()
| 215 | 0 |
'''simple docstring'''
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch)
# also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml
# same for Vicuna-13b
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipImageProcessor,
InstructBlipConfig,
InstructBlipForConditionalGeneration,
InstructBlipProcessor,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
LlamaConfig,
LlamaTokenizerFast,
TaConfig,
TaTokenizerFast,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def SCREAMING_SNAKE_CASE ( ):
lowercase = """https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg"""
lowercase = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw ).convert("""RGB""" )
return image
def SCREAMING_SNAKE_CASE ( lowercase_ : Tuple ):
lowercase = []
# fmt: off
# vision encoder
rename_keys.append(("""visual_encoder.cls_token""", """vision_model.embeddings.class_embedding""") )
rename_keys.append(("""visual_encoder.pos_embed""", """vision_model.embeddings.position_embedding""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.weight""", """vision_model.embeddings.patch_embedding.weight""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.bias""", """vision_model.embeddings.patch_embedding.bias""") )
rename_keys.append(("""ln_vision.weight""", """vision_model.post_layernorm.weight""") )
rename_keys.append(("""ln_vision.bias""", """vision_model.post_layernorm.bias""") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F"""visual_encoder.blocks.{i}.norm1.weight""", F"""vision_model.encoder.layers.{i}.layer_norm1.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.norm1.bias""", F"""vision_model.encoder.layers.{i}.layer_norm1.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.norm2.weight""", F"""vision_model.encoder.layers.{i}.layer_norm2.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.norm2.bias""", F"""vision_model.encoder.layers.{i}.layer_norm2.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.qkv.weight""", F"""vision_model.encoder.layers.{i}.self_attn.qkv.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.proj.weight""", F"""vision_model.encoder.layers.{i}.self_attn.projection.weight""",) )
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.proj.bias""", F"""vision_model.encoder.layers.{i}.self_attn.projection.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc1.weight""", F"""vision_model.encoder.layers.{i}.mlp.fc1.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc1.bias""", F"""vision_model.encoder.layers.{i}.mlp.fc1.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc2.weight""", F"""vision_model.encoder.layers.{i}.mlp.fc2.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc2.bias""", F"""vision_model.encoder.layers.{i}.mlp.fc2.bias""") )
# QFormer
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.weight""", """qformer.embeddings.layernorm.weight""") )
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.bias""", """qformer.embeddings.layernorm.bias""") )
# fmt: on
return rename_keys
def SCREAMING_SNAKE_CASE ( lowercase_ : Any , lowercase_ : Optional[int] , lowercase_ : Union[str, Any] ):
lowercase = dct.pop(lowercase_ )
lowercase = val
def SCREAMING_SNAKE_CASE ( lowercase_ : List[Any] , lowercase_ : Optional[int] ):
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
lowercase = state_dict.pop(F"""visual_encoder.blocks.{i}.attn.q_bias""" )
lowercase = state_dict.pop(F"""visual_encoder.blocks.{i}.attn.v_bias""" )
# next, set bias in the state dict
lowercase = torch.cat((q_bias, torch.zeros_like(lowercase_ , requires_grad=lowercase_ ), v_bias) )
lowercase = qkv_bias
def SCREAMING_SNAKE_CASE ( lowercase_ : List[str] ):
lowercase = 364 if """coco""" in model_name else 224
lowercase = InstructBlipVisionConfig(image_size=lowercase_ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "t5-xl" in model_name:
lowercase = TaConfig.from_pretrained("""google/flan-t5-xl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
lowercase = TaConfig.from_pretrained("""google/flan-t5-xxl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
elif "vicuna-7b" in model_name:
lowercase = LlamaConfig.from_pretrained("""decapoda-research/llama-7b-hf""" , vocab_size=3_2001 ).to_dict()
elif "vicuna-13b" in model_name:
lowercase = LlamaConfig.from_pretrained("""decapoda-research/llama-13b-hf""" , vocab_size=3_2001 ).to_dict()
else:
raise ValueError("""Model name not supported""" )
# the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1
lowercase = InstructBlipQFormerConfig(vocab_size=3_0523 ).to_dict()
lowercase = InstructBlipConfig(vision_config=lowercase_ , text_config=lowercase_ , qformer_config=lowercase_ )
return config, image_size
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( lowercase_ : Tuple , lowercase_ : List[Any]=None , lowercase_ : str=False ):
lowercase = AutoTokenizer.from_pretrained("""bert-base-uncased""" , truncation_side="""left""" )
qformer_tokenizer.add_special_tokens({"""bos_token""": """[DEC]"""} )
if "t5" in model_name:
lowercase = TaTokenizerFast.from_pretrained("""google/flan-t5-xl""" , truncation_side="""left""" )
elif "vicuna" in model_name:
# the following was used in the original implementation:
# tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left")
# tokenizer.add_special_tokens({"pad_token": "[PAD]"})
# tokenizer.add_special_tokens({"bos_token": "</s>"})
# tokenizer.add_special_tokens({"eos_token": "</s>"})
# tokenizer.add_special_tokens({"unk_token": "</s>"})
lowercase = LlamaTokenizerFast.from_pretrained(
"""huggyllama/llama-7b""" , truncation_side="""left""" , bos_token="""</s>""" , unk_token="""</s>""" )
tokenizer.add_special_tokens({"""pad_token""": """[PAD]"""} )
lowercase , lowercase = get_blipa_config(lowercase_ )
lowercase = InstructBlipForConditionalGeneration(lowercase_ ).eval()
lowercase = {
"""instructblip-vicuna-7b""": ("""blip2_vicuna_instruct""", """vicuna7b"""),
"""instructblip-vicuna-13b""": ("""blip2_vicuna_instruct""", """vicuna13b"""),
"""instructblip-flan-t5-xl""": ("""blip2_t5_instruct""", """flant5xl"""),
"""instructblip-flan-t5-xxl""": ("""blip2_t5_instruct""", """flant5xxl"""),
}
lowercase , lowercase = model_name_to_original[model_name]
# load original model
print("""Loading original model...""" )
lowercase = """cuda:1""" if torch.cuda.is_available() else """cpu"""
lowercase = """cuda:2""" if torch.cuda.is_available() else """cpu"""
lowercase , lowercase , lowercase = load_model_and_preprocess(
name=lowercase_ , model_type=lowercase_ , is_eval=lowercase_ , device=lowercase_ )
original_model.eval()
print("""Done!""" )
# update state dict keys
lowercase = original_model.state_dict()
lowercase = create_rename_keys(lowercase_ )
for src, dest in rename_keys:
rename_key(lowercase_ , lowercase_ , lowercase_ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
lowercase = state_dict.pop(lowercase_ )
if key.startswith("""Qformer.bert""" ):
lowercase = key.replace("""Qformer.bert""" , """qformer""" )
if "attention.self" in key:
lowercase = key.replace("""self""" , """attention""" )
if "llm_proj" in key:
lowercase = key.replace("""llm_proj""" , """language_projection""" )
if "t5_proj" in key:
lowercase = key.replace("""t5_proj""" , """language_projection""" )
if key.startswith("""llm_model""" ):
lowercase = key.replace("""llm_model""" , """language_model""" )
if key.startswith("""t5""" ):
lowercase = key.replace("""t5""" , """language""" )
lowercase = val
# read in qv biases
read_in_q_v_bias(lowercase_ , lowercase_ )
# note: weights get loaded in torch.float32 by default
hf_model.load_state_dict(lowercase_ , strict=lowercase_ )
lowercase = load_demo_image()
lowercase = """What is unusual about this image?"""
# create processor
lowercase = BlipImageProcessor(
size={"""height""": image_size, """width""": image_size} , image_mean=lowercase_ , image_std=lowercase_ )
lowercase = InstructBlipProcessor(
image_processor=lowercase_ , tokenizer=lowercase_ , qformer_tokenizer=lowercase_ , )
lowercase = processor(images=lowercase_ , text=lowercase_ , return_tensors="""pt""" ).to(lowercase_ )
# make sure processor creates exact same pixel values
lowercase = vis_processors["""eval"""](lowercase_ ).unsqueeze(0 ).to(lowercase_ )
lowercase = inputs.pixel_values
assert torch.allclose(original_pixel_values.to(pixel_values.device ) , lowercase_ )
original_model.to(lowercase_ )
hf_model.to(lowercase_ )
with torch.no_grad():
if "vicuna" in model_name:
lowercase = original_model({"""image""": original_pixel_values, """text_input""": [prompt]} ).logits
lowercase = hf_model(**lowercase_ ).logits
else:
lowercase = original_model(
{"""image""": original_pixel_values, """text_input""": [prompt], """text_output""": ["""\n"""]} ).logits
lowercase = tokenizer("""\n""" , return_tensors="""pt""" ).input_ids.to(lowercase_ )
lowercase = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id , -100 )
lowercase = hf_model(**lowercase_ , labels=lowercase_ ).logits
print("""First values of original logits:""" , original_logits[0, :3, :3] )
print("""First values of HF logits:""" , logits[0, :3, :3] )
# assert values
assert original_logits.shape == logits.shape
lowercase = 1E-4 if """vicuna""" in model_name else 1E-5
assert torch.allclose(original_logits.to(logits.device ) , lowercase_ , atol=lowercase_ )
print("""Looks ok!""" )
print("""Generating with original model...""" )
lowercase = original_model.generate({"""image""": original_pixel_values, """prompt""": prompt} , num_beams=5 )
# important: we need to cast the weights of the HF model to the appropriate type
print("""Generating with HF model...""" )
lowercase = hf_model.generate(
**lowercase_ , do_sample=lowercase_ , num_beams=5 , max_length=256 , min_length=1 , top_p=0.9 , repetition_penalty=1.5 , length_penalty=1.0 , temperature=1 , )
if "vicuna" in model_name:
# convert output id 0 to 2 (eos_token_id)
# TODO add this in the generate method?
lowercase = 2
print("""Original generation:""" , lowercase_ )
lowercase = processor.batch_decode(lowercase_ , skip_special_tokens=lowercase_ )
lowercase = [text.strip() for text in output_text]
print("""HF generation:""" , lowercase_ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(lowercase_ )
hf_model.save_pretrained(lowercase_ )
if push_to_hub:
processor.push_to_hub(F"""Salesforce/{model_name}""" )
hf_model.push_to_hub(F"""Salesforce/{model_name}""" )
if __name__ == "__main__":
lowercase_ : str = argparse.ArgumentParser()
lowercase_ : Tuple = [
'''instructblip-vicuna-7b''',
'''instructblip-vicuna-13b''',
'''instructblip-flan-t5-xl''',
'''instructblip-flan-t5-xxl''',
]
parser.add_argument(
'''--model_name''',
default='''instructblip-flan-t5-xl''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
lowercase_ : str = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 588 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( lowercase_ : int ):
lowercase = abs(lowercase_ )
lowercase = 0
while n > 0:
res += n % 10
n //= 10
return res
def SCREAMING_SNAKE_CASE ( lowercase_ : int ):
lowercase = abs(lowercase_ )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def SCREAMING_SNAKE_CASE ( lowercase_ : int ):
return sum(int(lowercase_ ) for c in str(abs(lowercase_ ) ) )
def SCREAMING_SNAKE_CASE ( ):
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(lowercase_ : Callable , lowercase_ : int ) -> None:
lowercase = F"""{func.__name__}({value})"""
lowercase = timeit(F"""__main__.{call}""" , setup="""import __main__""" )
print(F"""{call:56} = {func(lowercase_ )} -- {timing:.4f} seconds""" )
for value in (26_2144, 1125_8999_0684_2624, 126_7650_6002_2822_9401_4967_0320_5376):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(lowercase_ , lowercase_ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 588 | 1 |
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class SCREAMING_SNAKE_CASE ( snake_case ):
"""simple docstring"""
A_ = ["image_processor", "tokenizer"]
A_ = "FlavaImageProcessor"
A_ = ("BertTokenizer", "BertTokenizerFast")
def __init__( self: str , __A: Dict=None , __A: List[str]=None , **__A: Tuple ) -> str:
_A = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __A , )
_A = kwargs.pop('''feature_extractor''' )
_A = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__A , __A )
_A = self.image_processor
def __call__( self: Any , __A: Optional[ImageInput] = None , __A: Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None , __A: bool = True , __A: Union[bool, str, PaddingStrategy] = False , __A: Union[bool, str, TruncationStrategy] = False , __A: Optional[int] = None , __A: int = 0 , __A: Optional[int] = None , __A: Optional[bool] = None , __A: Optional[bool] = None , __A: Optional[bool] = None , __A: Optional[bool] = None , __A: bool = False , __A: bool = False , __A: bool = False , __A: bool = False , __A: bool = True , __A: Optional[Union[str, TensorType]] = None , **__A: List[Any] , ) -> Any:
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
_A = self.tokenizer(
text=__A , add_special_tokens=__A , padding=__A , truncation=__A , max_length=__A , stride=__A , pad_to_multiple_of=__A , return_token_type_ids=__A , return_attention_mask=__A , return_overflowing_tokens=__A , return_special_tokens_mask=__A , return_offsets_mapping=__A , return_length=__A , verbose=__A , return_tensors=__A , **__A , )
if images is not None:
_A = self.image_processor(
__A , return_image_mask=__A , return_codebook_pixels=__A , return_tensors=__A , **__A , )
if text is not None and images is not None:
encoding.update(__A )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__A ) , tensor_type=__A )
def __A ( self: int , *__A: Dict , **__A: Union[str, Any] ) -> str:
return self.tokenizer.batch_decode(*__A , **__A )
def __A ( self: Union[str, Any] , *__A: Optional[Any] , **__A: Tuple ) -> Union[str, Any]:
return self.tokenizer.decode(*__A , **__A )
@property
def __A ( self: Union[str, Any] ) -> str:
_A = self.tokenizer.model_input_names
_A = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __A ( self: Optional[int] ) -> Optional[int]:
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __A , )
return self.image_processor_class
@property
def __A ( self: Optional[int] ) -> Union[str, Any]:
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __A , )
return self.image_processor
| 62 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/config.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/config.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/config.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/config.json',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json',
'roberta-large-openai-detector': 'https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json',
}
class SCREAMING_SNAKE_CASE ( snake_case ):
"""simple docstring"""
A_ = "roberta"
def __init__( self: Dict , __A: int=5_02_65 , __A: Union[str, Any]=7_68 , __A: Union[str, Any]=12 , __A: str=12 , __A: int=30_72 , __A: str="gelu" , __A: Union[str, Any]=0.1 , __A: int=0.1 , __A: Optional[int]=5_12 , __A: Union[str, Any]=2 , __A: str=0.02 , __A: str=1e-12 , __A: Any=1 , __A: str=0 , __A: Any=2 , __A: Optional[int]="absolute" , __A: Optional[Any]=True , __A: Union[str, Any]=None , **__A: List[str] , ) -> Dict:
super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A )
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = hidden_act
_A = intermediate_size
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = type_vocab_size
_A = initializer_range
_A = layer_norm_eps
_A = position_embedding_type
_A = use_cache
_A = classifier_dropout
class SCREAMING_SNAKE_CASE ( snake_case ):
"""simple docstring"""
@property
def __A ( self: Dict ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_A = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_A = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 62 | 1 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 556 |
import numpy as np
def lowerCAmelCase_ (lowerCAmelCase__: np.ndarray , lowerCAmelCase__: float ):
"""simple docstring"""
return np.where(vector > 0 , lowerCAmelCase__ , (alpha * (np.exp(lowerCAmelCase__ ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 556 | 1 |
snake_case = """Alexander Joslin"""
import operator as op
from .stack import Stack
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = {"*": op.mul, "/": op.truediv, "+": op.add, "-": op.sub}
SCREAMING_SNAKE_CASE : Stack[int] = Stack()
SCREAMING_SNAKE_CASE : Stack[str] = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(lowercase ) )
elif i in operators:
# RULE 2
operator_stack.push(lowercase )
elif i == ")":
# RULE 4
SCREAMING_SNAKE_CASE : Tuple = operator_stack.peek()
operator_stack.pop()
SCREAMING_SNAKE_CASE : Tuple = operand_stack.peek()
operand_stack.pop()
SCREAMING_SNAKE_CASE : List[Any] = operand_stack.peek()
operand_stack.pop()
SCREAMING_SNAKE_CASE : List[Any] = operators[opr](lowercase , lowercase )
operand_stack.push(lowercase )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
snake_case = """(5 + ((4 * 2) * (2 + 3)))"""
# answer = 45
print(F"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
| 716 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
snake_case = """Create a default config file for Accelerate with only a few flags set."""
def lowerCamelCase__ ( lowercase="no" , lowercase = default_json_config_file , lowercase = False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = Path(lowercase )
path.parent.mkdir(parents=lowercase , exist_ok=lowercase )
if path.exists():
print(
F'''Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.''' )
return False
SCREAMING_SNAKE_CASE : int = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
F'''`mixed_precision` should be one of \'no\', \'fp16\', \'bf16\', or \'fp8\'. Received {mixed_precision}''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = {
"compute_environment": "LOCAL_MACHINE",
"mixed_precision": mixed_precision,
}
if torch.cuda.is_available():
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.cuda.device_count()
SCREAMING_SNAKE_CASE : int = num_gpus
SCREAMING_SNAKE_CASE : Union[str, Any] = False
if num_gpus > 1:
SCREAMING_SNAKE_CASE : Tuple = "MULTI_GPU"
else:
SCREAMING_SNAKE_CASE : Optional[Any] = "NO"
elif is_xpu_available() and use_xpu:
SCREAMING_SNAKE_CASE : List[str] = torch.xpu.device_count()
SCREAMING_SNAKE_CASE : str = num_xpus
SCREAMING_SNAKE_CASE : Union[str, Any] = False
if num_xpus > 1:
SCREAMING_SNAKE_CASE : Any = "MULTI_XPU"
else:
SCREAMING_SNAKE_CASE : str = "NO"
elif is_npu_available():
SCREAMING_SNAKE_CASE : List[Any] = torch.npu.device_count()
SCREAMING_SNAKE_CASE : Optional[Any] = num_npus
SCREAMING_SNAKE_CASE : Union[str, Any] = False
if num_npus > 1:
SCREAMING_SNAKE_CASE : str = "MULTI_NPU"
else:
SCREAMING_SNAKE_CASE : int = "NO"
else:
SCREAMING_SNAKE_CASE : Dict = 0
SCREAMING_SNAKE_CASE : List[Any] = True
SCREAMING_SNAKE_CASE : Dict = 1
SCREAMING_SNAKE_CASE : int = "NO"
SCREAMING_SNAKE_CASE : Dict = ClusterConfig(**lowercase )
config.to_json_file(lowercase )
return path
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = parser.add_parser("default" , parents=lowercase , help=lowercase , formatter_class=lowercase )
parser.add_argument(
"--config_file" , default=lowercase , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) , dest="save_location" , )
parser.add_argument(
"--mixed_precision" , choices=["no", "fp16", "bf16"] , type=lowercase , help="Whether or not to use mixed precision training. "
"Choose between FP16 and BF16 (bfloat16) training. "
"BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later." , default="no" , )
parser.set_defaults(func=lowercase )
return parser
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(F'''accelerate configuration saved at {config_file}''' )
| 488 | 0 |
class __snake_case :
'''simple docstring'''
def __init__( self , A_ , A_ , A_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = name
SCREAMING_SNAKE_CASE__ = value
SCREAMING_SNAKE_CASE__ = weight
def __repr__( self ):
'''simple docstring'''
return f'''{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'''
def lowercase_ ( self ):
'''simple docstring'''
return self.value
def lowercase_ ( self ):
'''simple docstring'''
return self.name
def lowercase_ ( self ):
'''simple docstring'''
return self.weight
def lowercase_ ( self ):
'''simple docstring'''
return self.value / self.weight
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any:
SCREAMING_SNAKE_CASE__ = []
for i in range(len(lowerCAmelCase_ ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Dict:
SCREAMING_SNAKE_CASE__ = sorted(lowerCAmelCase_ , key=lowerCAmelCase_ , reverse=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 0.0, 0.0
for i in range(len(lowerCAmelCase_ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def __snake_case ( ) -> str:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 100 |
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
_A : List[str] = logging.getLogger(__name__)
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ : Dict = """token-classification"""
def __init__( self , A_ ):
'''simple docstring'''
if type(A_ ) == dict:
SCREAMING_SNAKE_CASE__ = Namespace(**A_ )
SCREAMING_SNAKE_CASE__ = import_module('''tasks''' )
try:
SCREAMING_SNAKE_CASE__ = getattr(A_ , hparams.task_type )
SCREAMING_SNAKE_CASE__ = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f'''Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '''
f'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' )
SCREAMING_SNAKE_CASE__ = self.token_classification_task.get_labels(hparams.labels )
SCREAMING_SNAKE_CASE__ = CrossEntropyLoss().ignore_index
super().__init__(A_ , len(self.labels ) , self.mode )
def lowercase_ ( self , **A_ ):
'''simple docstring'''
return self.model(**A_ )
def lowercase_ ( self , A_ , A_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type != "distilbert":
SCREAMING_SNAKE_CASE__ = (
batch[2] if self.config.model_type in ['''bert''', '''xlnet'''] else None
) # XLM and RoBERTa don"t use token_type_ids
SCREAMING_SNAKE_CASE__ = self(**A_ )
SCREAMING_SNAKE_CASE__ = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = self.hparams
for mode in ["train", "dev", "test"]:
SCREAMING_SNAKE_CASE__ = self._feature_file(A_ )
if os.path.exists(A_ ) and not args.overwrite_cache:
logger.info('''Loading features from cached file %s''' , A_ )
SCREAMING_SNAKE_CASE__ = torch.load(A_ )
else:
logger.info('''Creating features from dataset file at %s''' , args.data_dir )
SCREAMING_SNAKE_CASE__ = self.token_classification_task.read_examples_from_file(args.data_dir , A_ )
SCREAMING_SNAKE_CASE__ = self.token_classification_task.convert_examples_to_features(
A_ , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ['''xlnet'''] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ['''xlnet'''] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=A_ , pad_on_left=bool(self.config.model_type in ['''xlnet'''] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info('''Saving features into cached file %s''' , A_ )
torch.save(A_ , A_ )
def lowercase_ ( self , A_ , A_ , A_ = False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = self._feature_file(A_ )
logger.info('''Loading features from cached file %s''' , A_ )
SCREAMING_SNAKE_CASE__ = torch.load(A_ )
SCREAMING_SNAKE_CASE__ = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
SCREAMING_SNAKE_CASE__ = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
SCREAMING_SNAKE_CASE__ = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
SCREAMING_SNAKE_CASE__ = torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
SCREAMING_SNAKE_CASE__ = torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(A_ , A_ , A_ , A_ ) , batch_size=A_ )
def lowercase_ ( self , A_ , A_ ):
'''simple docstring'''
"""Compute validation""" ""
SCREAMING_SNAKE_CASE__ = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type != "distilbert":
SCREAMING_SNAKE_CASE__ = (
batch[2] if self.config.model_type in ['''bert''', '''xlnet'''] else None
) # XLM and RoBERTa don"t use token_type_ids
SCREAMING_SNAKE_CASE__ = self(**A_ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = outputs[:2]
SCREAMING_SNAKE_CASE__ = logits.detach().cpu().numpy()
SCREAMING_SNAKE_CASE__ = inputs['''labels'''].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def lowercase_ ( self , A_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = torch.stack([x['''val_loss'''] for x in outputs] ).mean()
SCREAMING_SNAKE_CASE__ = np.concatenate([x['''pred'''] for x in outputs] , axis=0 )
SCREAMING_SNAKE_CASE__ = np.argmax(A_ , axis=2 )
SCREAMING_SNAKE_CASE__ = np.concatenate([x['''target'''] for x in outputs] , axis=0 )
SCREAMING_SNAKE_CASE__ = dict(enumerate(self.labels ) )
SCREAMING_SNAKE_CASE__ = [[] for _ in range(out_label_ids.shape[0] )]
SCREAMING_SNAKE_CASE__ = [[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
SCREAMING_SNAKE_CASE__ = {
'''val_loss''': val_loss_mean,
'''accuracy_score''': accuracy_score(A_ , A_ ),
'''precision''': precision_score(A_ , A_ ),
'''recall''': recall_score(A_ , A_ ),
'''f1''': fa_score(A_ , A_ ),
}
SCREAMING_SNAKE_CASE__ = dict(results.items() )
SCREAMING_SNAKE_CASE__ = results
return ret, preds_list, out_label_list
def lowercase_ ( self , A_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self._eval_end(A_ )
SCREAMING_SNAKE_CASE__ = ret['''log''']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def lowercase_ ( self , A_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self._eval_end(A_ )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
SCREAMING_SNAKE_CASE__ = ret['''log''']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def lowercase_ ( A_ , A_ ):
'''simple docstring'''
BaseTransformer.add_model_specific_args(A_ , A_ )
parser.add_argument(
'''--task_type''' , default='''NER''' , type=A_ , help='''Task type to fine tune in training (e.g. NER, POS, etc)''' )
parser.add_argument(
'''--max_seq_length''' , default=1_28 , type=A_ , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--labels''' , default='''''' , type=A_ , help='''Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.''' , )
parser.add_argument(
'''--gpus''' , default=0 , type=A_ , help='''The number of GPUs allocated for this, it is by default 0 meaning none''' , )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
return parser
if __name__ == "__main__":
_A : List[str] = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
_A : List[Any] = NERTransformer.add_model_specific_args(parser, os.getcwd())
_A : Optional[Any] = parser.parse_args()
_A : Any = NERTransformer(args)
_A : Any = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
_A : Optional[int] = sorted(glob.glob(os.path.join(args.output_dir, """checkpoint-epoch=*.ckpt"""), recursive=True))
_A : Optional[int] = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 100 | 1 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
__A : List[Any] = logging.get_logger(__name__)
__A : Optional[int] = {"vocab_file": "vocab.txt"}
__A : List[Any] = {
"vocab_file": {
"YituTech/conv-bert-base": "https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt",
"YituTech/conv-bert-medium-small": (
"https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt"
),
"YituTech/conv-bert-small": "https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt",
}
}
__A : int = {
"YituTech/conv-bert-base": 512,
"YituTech/conv-bert-medium-small": 512,
"YituTech/conv-bert-small": 512,
}
__A : List[Any] = {
"YituTech/conv-bert-base": {"do_lower_case": True},
"YituTech/conv-bert-medium-small": {"do_lower_case": True},
"YituTech/conv-bert-small": {"do_lower_case": True},
}
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
__magic_name__ : List[str] = VOCAB_FILES_NAMES
__magic_name__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ : Union[str, Any] = PRETRAINED_INIT_CONFIGURATION
__magic_name__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ : Optional[int] = ConvBertTokenizer
def __init__( self : Optional[Any] , UpperCamelCase__ : Dict=None , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Any=True , UpperCamelCase__ : str="[UNK]" , UpperCamelCase__ : int="[SEP]" , UpperCamelCase__ : List[str]="[PAD]" , UpperCamelCase__ : Optional[int]="[CLS]" , UpperCamelCase__ : Union[str, Any]="[MASK]" , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : int=None , **UpperCamelCase__ : Any , ):
super().__init__(
UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , do_lower_case=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , tokenize_chinese_chars=UpperCamelCase__ , strip_accents=UpperCamelCase__ , **UpperCamelCase__ , )
A__ : Union[str, Any] =json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , UpperCamelCase__ ) != do_lower_case
or normalizer_state.get("strip_accents" , UpperCamelCase__ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , UpperCamelCase__ ) != tokenize_chinese_chars
):
A__ : List[Any] =getattr(UpperCamelCase__ , normalizer_state.pop("type" ) )
A__ : List[Any] =do_lower_case
A__ : Dict =strip_accents
A__ : int =tokenize_chinese_chars
A__ : Union[str, Any] =normalizer_class(**UpperCamelCase__ )
A__ : Any =do_lower_case
def _UpperCAmelCase ( self : Any , UpperCamelCase__ : str , UpperCamelCase__ : int=None ):
A__ : Tuple =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _UpperCAmelCase ( self : str , UpperCamelCase__ : Dict , UpperCamelCase__ : Tuple = None ):
A__ : Any =[self.sep_token_id]
A__ : str =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCAmelCase ( self : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict = None ):
A__ : str =self._tokenizer.model.save(UpperCamelCase__ , name=UpperCamelCase__ )
return tuple(UpperCamelCase__ )
| 716 | """simple docstring"""
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class __lowerCAmelCase ( nn.Module):
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCamelCase__ : int = 16 , UpperCamelCase__ : int = 88 , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : int = 1 , UpperCamelCase__ : float = 0.0 , UpperCamelCase__ : int = 32 , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : str = "geglu" , UpperCamelCase__ : Optional[int] = None , ):
super().__init__()
A__ : Dict =nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=UpperCamelCase__ , attention_head_dim=UpperCamelCase__ , in_channels=UpperCamelCase__ , num_layers=UpperCamelCase__ , dropout=UpperCamelCase__ , norm_num_groups=UpperCamelCase__ , cross_attention_dim=UpperCamelCase__ , attention_bias=UpperCamelCase__ , sample_size=UpperCamelCase__ , num_vector_embeds=UpperCamelCase__ , activation_fn=UpperCamelCase__ , num_embeds_ada_norm=UpperCamelCase__ , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
A__ : Dict =0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
A__ : List[str] =[77, 257]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
A__ : Union[str, Any] =[1, 0]
def _UpperCAmelCase ( self : Tuple , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : int=None , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : bool = True , ):
A__ : Optional[Any] =hidden_states
A__ : Union[str, Any] =[]
A__ : str =0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
A__ : Dict =encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
A__ : str =self.transformer_index_for_condition[i]
A__ : Optional[int] =self.transformers[transformer_index](
UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , timestep=UpperCamelCase__ , cross_attention_kwargs=UpperCamelCase__ , return_dict=UpperCamelCase__ , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
A__ : Union[str, Any] =encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
A__ : Dict =output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=UpperCamelCase__ )
| 595 | 0 |
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@property
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
__lowercase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def _SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
__lowercase = self.dummy_uncond_unet
__lowercase = ScoreSdeVeScheduler()
__lowercase = ScoreSdeVePipeline(unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ )
sde_ve.to(lowerCAmelCase__ )
sde_ve.set_progress_bar_config(disable=lowerCAmelCase__ )
__lowercase = torch.manual_seed(0 )
__lowercase = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=lowerCAmelCase__ ).images
__lowercase = torch.manual_seed(0 )
__lowercase = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=lowerCAmelCase__ , return_dict=lowerCAmelCase__ )[
0
]
__lowercase = image[0, -3:, -3:, -1]
__lowercase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__lowercase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
__lowercase = '''google/ncsnpp-church-256'''
__lowercase = UNetaDModel.from_pretrained(lowerCAmelCase__ )
__lowercase = ScoreSdeVeScheduler.from_pretrained(lowerCAmelCase__ )
__lowercase = ScoreSdeVePipeline(unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ )
sde_ve.to(lowerCAmelCase__ )
sde_ve.set_progress_bar_config(disable=lowerCAmelCase__ )
__lowercase = torch.manual_seed(0 )
__lowercase = sde_ve(num_inference_steps=10 , output_type='''numpy''' , generator=lowerCAmelCase__ ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
__lowercase = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 | 534 | def UpperCAmelCase ( lowercase , lowercase ):
"""simple docstring"""
__lowercase = word.split()
def justify(lowercase , lowercase , lowercase ) -> str:
__lowercase = max_width - width
__lowercase = len(lowercase )
if len(lowercase ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
__lowercase = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
__lowercase = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
__lowercase = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(lowercase ):
num_spaces_between_words_list[i] += 1
__lowercase = []
for i in range(lowercase ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * ''' ''' )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(lowercase )
__lowercase = []
__lowercase = []
__lowercase = 0
for word in words:
if width + len(lowercase ) + len(lowercase ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(lowercase )
width += len(lowercase )
else:
# justify the line and add it to result
answer.append(justify(lowercase , lowercase , lowercase ) )
# reset new line and new width
__lowercase , __lowercase = [word], len(lowercase )
__lowercase = max_width - width - len(lowercase )
answer.append(''' '''.join(lowercase ) + (remaining_spaces + 1) * ''' ''' )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod() | 534 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=7 , lowerCAmelCase__=3 , lowerCAmelCase__=18 , lowerCAmelCase__=30 , lowerCAmelCase__=400 , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__=[0.5, 0.5, 0.5] , lowerCAmelCase__=[0.5, 0.5, 0.5] , ) -> Optional[int]:
SCREAMING_SNAKE_CASE = size if size is not None else {'height': 18, 'width': 18}
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = min_resolution
SCREAMING_SNAKE_CASE = max_resolution
SCREAMING_SNAKE_CASE = do_resize
SCREAMING_SNAKE_CASE = size
SCREAMING_SNAKE_CASE = do_normalize
SCREAMING_SNAKE_CASE = image_mean
SCREAMING_SNAKE_CASE = image_std
def __A ( self ) -> int:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class lowerCAmelCase ( lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = DPTImageProcessor if is_vision_available() else None
def __A ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE = DPTImageProcessingTester(self )
@property
def __A ( self ) -> Dict:
return self.image_processor_tester.prepare_image_processor_dict()
def __A ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , 'image_mean' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , 'image_std' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , 'do_normalize' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , 'do_resize' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , 'size' ) )
def __A ( self ) -> int:
SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 18} )
SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
def __A ( self ) -> str:
# Initialize image_processing
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
SCREAMING_SNAKE_CASE = image_processing(lowerCAmelCase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __A ( self ) -> Union[str, Any]:
# Initialize image_processing
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
SCREAMING_SNAKE_CASE = image_processing(lowerCAmelCase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __A ( self ) -> Tuple:
# Initialize image_processing
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
SCREAMING_SNAKE_CASE = image_processing(lowerCAmelCase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
| 327 |
"""simple docstring"""
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
'''snap-research/efficientformer-l1-300''': (
'''https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json'''
),
}
class lowerCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = """efficientformer"""
def __init__( self , lowerCAmelCase__ = [3, 2, 6, 4] , lowerCAmelCase__ = [48, 96, 224, 448] , lowerCAmelCase__ = [True, True, True, True] , lowerCAmelCase__ = 448 , lowerCAmelCase__ = 32 , lowerCAmelCase__ = 4 , lowerCAmelCase__ = 7 , lowerCAmelCase__ = 5 , lowerCAmelCase__ = 8 , lowerCAmelCase__ = 4 , lowerCAmelCase__ = 0.0 , lowerCAmelCase__ = 16 , lowerCAmelCase__ = 3 , lowerCAmelCase__ = 3 , lowerCAmelCase__ = 3 , lowerCAmelCase__ = 2 , lowerCAmelCase__ = 1 , lowerCAmelCase__ = 0.0 , lowerCAmelCase__ = 1 , lowerCAmelCase__ = True , lowerCAmelCase__ = True , lowerCAmelCase__ = 1e-5 , lowerCAmelCase__ = "gelu" , lowerCAmelCase__ = 0.02 , lowerCAmelCase__ = 1e-12 , lowerCAmelCase__ = 224 , lowerCAmelCase__ = 1e-05 , **lowerCAmelCase__ , ) -> None:
super().__init__(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = hidden_sizes
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = patch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = depths
SCREAMING_SNAKE_CASE = mlp_expansion_ratio
SCREAMING_SNAKE_CASE = downsamples
SCREAMING_SNAKE_CASE = dim
SCREAMING_SNAKE_CASE = key_dim
SCREAMING_SNAKE_CASE = attention_ratio
SCREAMING_SNAKE_CASE = resolution
SCREAMING_SNAKE_CASE = pool_size
SCREAMING_SNAKE_CASE = downsample_patch_size
SCREAMING_SNAKE_CASE = downsample_stride
SCREAMING_SNAKE_CASE = downsample_pad
SCREAMING_SNAKE_CASE = drop_path_rate
SCREAMING_SNAKE_CASE = num_metaad_blocks
SCREAMING_SNAKE_CASE = distillation
SCREAMING_SNAKE_CASE = use_layer_scale
SCREAMING_SNAKE_CASE = layer_scale_init_value
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = batch_norm_eps
| 327 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
_a : Union[str, Any] = logging.get_logger(__name__)
class _lowercase ( __SCREAMING_SNAKE_CASE ):
def __init__( self : Optional[Any] , *SCREAMING_SNAKE_CASE_ : List[str] , **SCREAMING_SNAKE_CASE_ : List[Any] ) -> Tuple:
warnings.warn(
'The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use DonutImageProcessor instead.' , __SCREAMING_SNAKE_CASE , )
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
| 56 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A_ : int = logging.get_logger(__name__)
A_ : Dict = {
"google/bit-50": "https://huggingface.co/google/bit-50/resolve/main/config.json",
}
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = '''bit'''
lowerCamelCase__ = ['''preactivation''', '''bottleneck''']
lowerCamelCase__ = ['''SAME''', '''VALID''']
def __init__( self , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=6_4 , __SCREAMING_SNAKE_CASE=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , __SCREAMING_SNAKE_CASE=[3, 4, 6, 3] , __SCREAMING_SNAKE_CASE="preactivation" , __SCREAMING_SNAKE_CASE="relu" , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ):
super().__init__(**__SCREAMING_SNAKE_CASE )
if layer_type not in self.layer_types:
raise ValueError(f"layer_type={layer_type} is not one of {','.join(self.layer_types )}" )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
snake_case__ : Tuple = global_padding.upper()
else:
raise ValueError(f"Padding strategy {global_padding} not supported" )
snake_case__ : List[str] = num_channels
snake_case__ : Tuple = embedding_size
snake_case__ : str = hidden_sizes
snake_case__ : Optional[Any] = depths
snake_case__ : List[Any] = layer_type
snake_case__ : Dict = hidden_act
snake_case__ : Union[str, Any] = global_padding
snake_case__ : List[str] = num_groups
snake_case__ : str = drop_path_rate
snake_case__ : List[Any] = embedding_dynamic_padding
snake_case__ : List[str] = output_stride
snake_case__ : Dict = width_factor
snake_case__ : List[str] = ["""stem"""] + [f"stage{idx}" for idx in range(1 , len(__SCREAMING_SNAKE_CASE ) + 1 )]
snake_case__ , snake_case__ : Dict = get_aligned_output_features_output_indices(
out_features=__SCREAMING_SNAKE_CASE , out_indices=__SCREAMING_SNAKE_CASE , stage_names=self.stage_names )
| 38 | 0 |
import math
from datetime import datetime, timedelta
def _lowercase ( UpperCamelCase_ ) -> datetime:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = year % 19
SCREAMING_SNAKE_CASE__ = year % 4
SCREAMING_SNAKE_CASE__ = year % 7
SCREAMING_SNAKE_CASE__ = math.floor(year / 100 )
SCREAMING_SNAKE_CASE__ = math.floor((13 + 8 * leap_day_inhibits) / 25 )
SCREAMING_SNAKE_CASE__ = leap_day_inhibits / 4
SCREAMING_SNAKE_CASE__ = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
SCREAMING_SNAKE_CASE__ = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
SCREAMING_SNAKE_CASE__ = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
SCREAMING_SNAKE_CASE__ = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(UpperCamelCase_ , 4 , 19 )
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(UpperCamelCase_ , 4 , 18 )
else:
return datetime(UpperCamelCase_ , 3 , 22 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (19_94, 20_00, 20_10, 20_21, 20_23):
__snake_case = """will be""" if year > datetime.now().year else """was"""
print(F"""Easter in {year} {tense} {gauss_easter(year)}""")
| 400 |
import math
from datetime import datetime, timedelta
def _lowercase ( UpperCamelCase_ ) -> datetime:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = year % 19
SCREAMING_SNAKE_CASE__ = year % 4
SCREAMING_SNAKE_CASE__ = year % 7
SCREAMING_SNAKE_CASE__ = math.floor(year / 100 )
SCREAMING_SNAKE_CASE__ = math.floor((13 + 8 * leap_day_inhibits) / 25 )
SCREAMING_SNAKE_CASE__ = leap_day_inhibits / 4
SCREAMING_SNAKE_CASE__ = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
SCREAMING_SNAKE_CASE__ = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
SCREAMING_SNAKE_CASE__ = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
SCREAMING_SNAKE_CASE__ = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(UpperCamelCase_ , 4 , 19 )
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(UpperCamelCase_ , 4 , 18 )
else:
return datetime(UpperCamelCase_ , 3 , 22 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (19_94, 20_00, 20_10, 20_21, 20_23):
__snake_case = """will be""" if year > datetime.now().year else """was"""
print(F"""Easter in {year} {tense} {gauss_easter(year)}""")
| 400 | 1 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=lowerCAmelCase__ )
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = field(default='''image-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
lowerCAmelCase_ = Features({'''image''': Image()} )
lowerCAmelCase_ = Features({'''labels''': ClassLabel} )
lowerCAmelCase_ = "image"
lowerCAmelCase_ = "labels"
def UpperCAmelCase__ ( self : Optional[Any] , _A : Optional[Any] ):
"""simple docstring"""
if self.label_column not in features:
raise ValueError(F'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , _A ):
raise ValueError(F'''Column {self.label_column} is not a ClassLabel.''' )
__SCREAMING_SNAKE_CASE : Any = copy.deepcopy(self )
__SCREAMING_SNAKE_CASE : Optional[int] = self.label_schema.copy()
__SCREAMING_SNAKE_CASE : Union[str, Any] = features[self.label_column]
__SCREAMING_SNAKE_CASE : List[Any] = label_schema
return task_template
@property
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
return {
self.image_column: "image",
self.label_column: "labels",
}
| 74 |
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
lowercase_ = datasets.utils.logging.get_logger(__name__)
@dataclass
class __UpperCamelCase ( datasets.BuilderConfig ):
"""simple docstring"""
lowerCAmelCase_ = 1_00_00
lowerCAmelCase_ = None
lowerCAmelCase_ = None
class __UpperCamelCase ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
lowerCAmelCase_ = ParquetConfig
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def UpperCAmelCase__ ( self : Any , _A : Optional[Any] ):
"""simple docstring"""
if not self.config.data_files:
raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
__SCREAMING_SNAKE_CASE : List[str] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_A , (str, list, tuple) ):
__SCREAMING_SNAKE_CASE : Tuple = data_files
if isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE : Optional[int] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__SCREAMING_SNAKE_CASE : List[Any] = [dl_manager.iter_files(_A ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
__SCREAMING_SNAKE_CASE : int = []
for split_name, files in data_files.items():
if isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE : Any = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__SCREAMING_SNAKE_CASE : Optional[int] = [dl_manager.iter_files(_A ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(_A ):
with open(_A , '''rb''' ) as f:
__SCREAMING_SNAKE_CASE : Dict = datasets.Features.from_arrow_schema(pq.read_schema(_A ) )
break
splits.append(datasets.SplitGenerator(name=_A , gen_kwargs={'''files''': files} ) )
return splits
def UpperCAmelCase__ ( self : str , _A : pa.Table ):
"""simple docstring"""
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
__SCREAMING_SNAKE_CASE : str = table_cast(_A , self.info.features.arrow_schema )
return pa_table
def UpperCAmelCase__ ( self : Tuple , _A : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
F'''Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'''' )
for file_idx, file in enumerate(itertools.chain.from_iterable(_A ) ):
with open(_A , '''rb''' ) as f:
__SCREAMING_SNAKE_CASE : str = pq.ParquetFile(_A )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
__SCREAMING_SNAKE_CASE : Optional[Any] = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield F'''{file_idx}_{batch_idx}''', self._cast_table(_A )
except ValueError as e:
logger.error(F'''Failed to read file \'{file}\' with error {type(_A )}: {e}''' )
raise
| 74 | 1 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
lowercase_ = logging.getLogger(__name__)
@dataclass
class snake_case :
'''simple docstring'''
A_ : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
A_ : Optional[str] = field(
default=_lowerCAmelCase , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
A_ : Optional[str] = field(
default="NER" , metadata={"help": "Task type to fine tune in training (e.g. NER, POS, etc)"} )
A_ : Optional[str] = field(
default=_lowerCAmelCase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
A_ : bool = field(default=_lowerCAmelCase , metadata={"help": "Set this flag to use fast tokenization."} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
A_ : Optional[str] = field(
default=_lowerCAmelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class snake_case :
'''simple docstring'''
A_ : str = field(
metadata={"help": "The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."} )
A_ : Optional[str] = field(
default=_lowerCAmelCase , metadata={"help": "Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."} , )
A_ : int = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
A_ : bool = field(
default=_lowerCAmelCase , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def lowerCAmelCase ( ):
"""simple docstring"""
__A = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__A , __A , __A = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__A , __A , __A = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
''' --overwrite_output_dir to overcome.''' )
__A = import_module('''tasks''' )
try:
__A = getattr(__UpperCamelCase , model_args.task_type )
__A = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f'Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '
f'Available tasks classes are: {TokenClassificationTask.__subclasses__()}' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , __UpperCamelCase )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
__A = token_classification_task.get_labels(data_args.labels )
__A = dict(enumerate(__UpperCamelCase ) )
__A = len(__UpperCamelCase )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__A = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__UpperCamelCase , idalabel=__UpperCamelCase , labelaid={label: i for i, label in enumerate(__UpperCamelCase )} , cache_dir=model_args.cache_dir , )
__A = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
__A = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__UpperCamelCase , cache_dir=model_args.cache_dir , )
# Get datasets
__A = (
TokenClassificationDataset(
token_classification_task=__UpperCamelCase , data_dir=data_args.data_dir , tokenizer=__UpperCamelCase , labels=__UpperCamelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
__A = (
TokenClassificationDataset(
token_classification_task=__UpperCamelCase , data_dir=data_args.data_dir , tokenizer=__UpperCamelCase , labels=__UpperCamelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(__UpperCamelCase , __UpperCamelCase ) -> Tuple[List[int], List[int]]:
__A = np.argmax(__UpperCamelCase , axis=2 )
__A , __A = preds.shape
__A = [[] for _ in range(__UpperCamelCase )]
__A = [[] for _ in range(__UpperCamelCase )]
for i in range(__UpperCamelCase ):
for j in range(__UpperCamelCase ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(__UpperCamelCase ) -> Dict:
__A , __A = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(__UpperCamelCase , __UpperCamelCase ),
"precision": precision_score(__UpperCamelCase , __UpperCamelCase ),
"recall": recall_score(__UpperCamelCase , __UpperCamelCase ),
"f1": fa_score(__UpperCamelCase , __UpperCamelCase ),
}
# Data collator
__A = DataCollatorWithPadding(__UpperCamelCase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
__A = Trainer(
model=__UpperCamelCase , args=__UpperCamelCase , train_dataset=__UpperCamelCase , eval_dataset=__UpperCamelCase , compute_metrics=__UpperCamelCase , data_collator=__UpperCamelCase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__A = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__A = trainer.evaluate()
__A = os.path.join(training_args.output_dir , '''eval_results.txt''' )
if trainer.is_world_process_zero():
with open(__UpperCamelCase , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''' , __UpperCamelCase , __UpperCamelCase )
writer.write('''%s = %s\n''' % (key, value) )
results.update(__UpperCamelCase )
# Predict
if training_args.do_predict:
__A = TokenClassificationDataset(
token_classification_task=__UpperCamelCase , data_dir=data_args.data_dir , tokenizer=__UpperCamelCase , labels=__UpperCamelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
__A , __A , __A = trainer.predict(__UpperCamelCase )
__A , __A = align_predictions(__UpperCamelCase , __UpperCamelCase )
__A = os.path.join(training_args.output_dir , '''test_results.txt''' )
if trainer.is_world_process_zero():
with open(__UpperCamelCase , '''w''' ) as writer:
for key, value in metrics.items():
logger.info(''' %s = %s''' , __UpperCamelCase , __UpperCamelCase )
writer.write('''%s = %s\n''' % (key, value) )
# Save predictions
__A = os.path.join(training_args.output_dir , '''test_predictions.txt''' )
if trainer.is_world_process_zero():
with open(__UpperCamelCase , '''w''' ) as writer:
with open(os.path.join(data_args.data_dir , '''test.txt''' ) , '''r''' ) as f:
token_classification_task.write_predictions_to_file(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return results
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 215 |
"""simple docstring"""
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
def get_matched_characters(__UpperCamelCase , __UpperCamelCase ) -> str:
__A = []
__A = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
__A = int(max(0 , i - limit ) )
__A = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(__UpperCamelCase )
__A = f'{_stra[0:_stra.index(__UpperCamelCase )]} {_stra[_stra.index(__UpperCamelCase ) + 1:]}'
return "".join(__UpperCamelCase )
# matching characters
__A = get_matched_characters(__UpperCamelCase , __UpperCamelCase )
__A = get_matched_characters(__UpperCamelCase , __UpperCamelCase )
__A = len(__UpperCamelCase )
# transposition
__A = (
len([(ca, ca) for ca, ca in zip(__UpperCamelCase , __UpperCamelCase ) if ca != ca] ) // 2
)
if not match_count:
__A = 0.0
else:
__A = (
1
/ 3
* (
match_count / len(__UpperCamelCase )
+ match_count / len(__UpperCamelCase )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
__A = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('hello', 'world'))
| 215 | 1 |
'''simple docstring'''
from __future__ import annotations
def lowercase_ ( __A : list[int] ) -> list[int]: # This function is recursive
"""simple docstring"""
lowercase : str =len(__A )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
lowercase : Any =array[0]
lowercase : List[Any] =False
lowercase : Union[str, Any] =1
lowercase : list[int] =[]
while not is_found and i < array_length:
if array[i] < pivot:
lowercase : Optional[int] =True
lowercase : Dict =[element for element in array[i:] if element >= array[i]]
lowercase : Union[str, Any] =longest_subsequence(__A )
if len(__A ) > len(__A ):
lowercase : int =temp_array
else:
i += 1
lowercase : str =[element for element in array[1:] if element >= pivot]
lowercase : int =[pivot, *longest_subsequence(__A )]
if len(__A ) > len(__A ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 94 |
'''simple docstring'''
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def lowercase_ ( __A : str ) -> Union[str, Any]:
"""simple docstring"""
return x + 2
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self : Dict ) -> List[Any]:
'''simple docstring'''
lowercase : Optional[int] ='''x = 3'''
lowercase : Any ={}
lowercase : Union[str, Any] =evaluate(UpperCAmelCase , {} , state=UpperCAmelCase )
assert result == 3
self.assertDictEqual(UpperCAmelCase , {'''x''': 3} )
lowercase : str ='''x = y'''
lowercase : Optional[int] ={'''y''': 5}
lowercase : List[str] =evaluate(UpperCAmelCase , {} , state=UpperCAmelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(UpperCAmelCase , {'''x''': 5, '''y''': 5} )
def A__ ( self : str ) -> Optional[int]:
'''simple docstring'''
lowercase : Optional[int] ='''y = add_two(x)'''
lowercase : str ={'''x''': 3}
lowercase : List[str] =evaluate(UpperCAmelCase , {'''add_two''': add_two} , state=UpperCAmelCase )
assert result == 5
self.assertDictEqual(UpperCAmelCase , {'''x''': 3, '''y''': 5} )
# Won't work without the tool
with CaptureStdout() as out:
lowercase : Optional[Any] =evaluate(UpperCAmelCase , {} , state=UpperCAmelCase )
assert result is None
assert "tried to execute add_two" in out.out
def A__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
lowercase : int ='''x = 3'''
lowercase : Dict ={}
lowercase : List[Any] =evaluate(UpperCAmelCase , {} , state=UpperCAmelCase )
assert result == 3
self.assertDictEqual(UpperCAmelCase , {'''x''': 3} )
def A__ ( self : str ) -> Tuple:
'''simple docstring'''
lowercase : Optional[Any] ='''test_dict = {\'x\': x, \'y\': add_two(x)}'''
lowercase : str ={'''x''': 3}
lowercase : Tuple =evaluate(UpperCAmelCase , {'''add_two''': add_two} , state=UpperCAmelCase )
self.assertDictEqual(UpperCAmelCase , {'''x''': 3, '''y''': 5} )
self.assertDictEqual(UpperCAmelCase , {'''x''': 3, '''test_dict''': {'''x''': 3, '''y''': 5}} )
def A__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Optional[Any] ='''x = 3\ny = 5'''
lowercase : int ={}
lowercase : List[str] =evaluate(UpperCAmelCase , {} , state=UpperCAmelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(UpperCAmelCase , {'''x''': 3, '''y''': 5} )
def A__ ( self : Any ) -> Tuple:
'''simple docstring'''
lowercase : List[str] ='''text = f\'This is x: {x}.\''''
lowercase : Union[str, Any] ={'''x''': 3}
lowercase : Tuple =evaluate(UpperCAmelCase , {} , state=UpperCAmelCase )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(UpperCAmelCase , {'''x''': 3, '''text''': '''This is x: 3.'''} )
def A__ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
lowercase : Tuple ='''if x <= 3:\n y = 2\nelse:\n y = 5'''
lowercase : Union[str, Any] ={'''x''': 3}
lowercase : Any =evaluate(UpperCAmelCase , {} , state=UpperCAmelCase )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(UpperCAmelCase , {'''x''': 3, '''y''': 2} )
lowercase : Optional[Any] ={'''x''': 8}
lowercase : str =evaluate(UpperCAmelCase , {} , state=UpperCAmelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(UpperCAmelCase , {'''x''': 8, '''y''': 5} )
def A__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
lowercase : List[str] ='''test_list = [x, add_two(x)]'''
lowercase : Any ={'''x''': 3}
lowercase : Tuple =evaluate(UpperCAmelCase , {'''add_two''': add_two} , state=UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , [3, 5] )
self.assertDictEqual(UpperCAmelCase , {'''x''': 3, '''test_list''': [3, 5]} )
def A__ ( self : Any ) -> Tuple:
'''simple docstring'''
lowercase : str ='''y = x'''
lowercase : Dict ={'''x''': 3}
lowercase : Tuple =evaluate(UpperCAmelCase , {} , state=UpperCAmelCase )
assert result == 3
self.assertDictEqual(UpperCAmelCase , {'''x''': 3, '''y''': 3} )
def A__ ( self : str ) -> List[str]:
'''simple docstring'''
lowercase : Any ='''test_list = [x, add_two(x)]\ntest_list[1]'''
lowercase : Any ={'''x''': 3}
lowercase : Union[str, Any] =evaluate(UpperCAmelCase , {'''add_two''': add_two} , state=UpperCAmelCase )
assert result == 5
self.assertDictEqual(UpperCAmelCase , {'''x''': 3, '''test_list''': [3, 5]} )
lowercase : int ='''test_dict = {\'x\': x, \'y\': add_two(x)}\ntest_dict[\'y\']'''
lowercase : Union[str, Any] ={'''x''': 3}
lowercase : Tuple =evaluate(UpperCAmelCase , {'''add_two''': add_two} , state=UpperCAmelCase )
assert result == 5
self.assertDictEqual(UpperCAmelCase , {'''x''': 3, '''test_dict''': {'''x''': 3, '''y''': 5}} )
def A__ ( self : List[str] ) -> Tuple:
'''simple docstring'''
lowercase : Optional[int] ='''x = 0\nfor i in range(3):\n x = i'''
lowercase : List[str] ={}
lowercase : Union[str, Any] =evaluate(UpperCAmelCase , {'''range''': range} , state=UpperCAmelCase )
assert result == 2
self.assertDictEqual(UpperCAmelCase , {'''x''': 2, '''i''': 2} )
| 94 | 1 |
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class A__ ( __lowerCamelCase ):
lowercase = ['image_processor', 'tokenizer']
lowercase = 'BlipImageProcessor'
lowercase = 'AutoTokenizer'
def __init__( self : Tuple , a : Tuple , a : int , a : Tuple ):
'''simple docstring'''
super().__init__(UpperCAmelCase_ , UpperCAmelCase_ )
# add QFormer tokenizer
lowerCAmelCase__ : Tuple = qformer_tokenizer
def __call__( self : List[Any] , a : ImageInput = None , a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , a : bool = True , a : Union[bool, str, PaddingStrategy] = False , a : Union[bool, str, TruncationStrategy] = None , a : Optional[int] = None , a : int = 0 , a : Optional[int] = None , a : Optional[bool] = None , a : bool = False , a : bool = False , a : bool = False , a : bool = False , a : bool = False , a : bool = True , a : Optional[Union[str, TensorType]] = None , **a : List[str] , ):
'''simple docstring'''
if images is None and text is None:
raise ValueError('You have to specify at least images or text.' )
lowerCAmelCase__ : Tuple = BatchFeature()
if text is not None:
lowerCAmelCase__ : List[str] = self.tokenizer(
text=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=UpperCAmelCase_ , stride=UpperCAmelCase_ , pad_to_multiple_of=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , return_overflowing_tokens=UpperCAmelCase_ , return_special_tokens_mask=UpperCAmelCase_ , return_offsets_mapping=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ , return_length=UpperCAmelCase_ , verbose=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ , )
encoding.update(UpperCAmelCase_ )
lowerCAmelCase__ : int = self.qformer_tokenizer(
text=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=UpperCAmelCase_ , stride=UpperCAmelCase_ , pad_to_multiple_of=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , return_overflowing_tokens=UpperCAmelCase_ , return_special_tokens_mask=UpperCAmelCase_ , return_offsets_mapping=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ , return_length=UpperCAmelCase_ , verbose=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ , )
lowerCAmelCase__ : List[Any] = qformer_text_encoding.pop('input_ids' )
lowerCAmelCase__ : Any = qformer_text_encoding.pop('attention_mask' )
if images is not None:
lowerCAmelCase__ : Optional[int] = self.image_processor(UpperCAmelCase_ , return_tensors=UpperCAmelCase_ )
encoding.update(UpperCAmelCase_ )
return encoding
def _lowerCamelCase ( self : List[str] , *a : Union[str, Any] , **a : Union[str, Any] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
def _lowerCamelCase ( self : Optional[Any] , *a : List[str] , **a : Union[str, Any] ):
'''simple docstring'''
return self.tokenizer.decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : int = self.tokenizer.model_input_names
lowerCAmelCase__ : Tuple = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def _lowerCamelCase ( self : List[Any] , a : str , **a : Union[str, Any] ):
'''simple docstring'''
if os.path.isfile(UpperCAmelCase_ ):
raise ValueError(f'''Provided path ({save_directory}) should be a directory, not a file''' )
os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_ )
lowerCAmelCase__ : Union[str, Any] = os.path.join(UpperCAmelCase_ , 'qformer_tokenizer' )
self.qformer_tokenizer.save_pretrained(UpperCAmelCase_ )
return super().save_pretrained(UpperCAmelCase_ , **UpperCAmelCase_ )
@classmethod
def _lowerCamelCase ( cls : Dict , a : str , **a : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained(UpperCAmelCase_ , subfolder='qformer_tokenizer' )
lowerCAmelCase__ : Union[str, Any] = cls._get_arguments_from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_ )
args.append(UpperCAmelCase_ )
return cls(*UpperCAmelCase_ )
| 706 |
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A__ ( __magic_name__ , unittest.TestCase ):
lowercase = ConsistencyModelPipeline
lowercase = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
lowercase = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
lowercase = frozenset(
[
'num_inference_steps',
'generator',
'latents',
'output_type',
'return_dict',
'callback',
'callback_steps',
] )
@property
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Dict = UNetaDModel.from_pretrained(
'diffusers/consistency-models-test' , subfolder='test_unet' , )
return unet
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = UNetaDModel.from_pretrained(
'diffusers/consistency-models-test' , subfolder='test_unet_class_cond' , )
return unet
def _lowerCamelCase ( self : Optional[Any] , a : Union[str, Any]=False ):
'''simple docstring'''
if class_cond:
lowerCAmelCase__ : Tuple = self.dummy_cond_unet
else:
lowerCAmelCase__ : Dict = self.dummy_uncond_unet
# Default to CM multistep sampler
lowerCAmelCase__ : Optional[Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
lowerCAmelCase__ : List[Any] = {
'unet': unet,
'scheduler': scheduler,
}
return components
def _lowerCamelCase ( self : int , a : Optional[int] , a : Any=0 ):
'''simple docstring'''
if str(a ).startswith('mps' ):
lowerCAmelCase__ : List[str] = torch.manual_seed(a )
else:
lowerCAmelCase__ : str = torch.Generator(device=a ).manual_seed(a )
lowerCAmelCase__ : str = {
'batch_size': 1,
'num_inference_steps': None,
'timesteps': [22, 0],
'generator': generator,
'output_type': 'np',
}
return inputs
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Any = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ : Optional[Any] = self.get_dummy_components()
lowerCAmelCase__ : List[Any] = ConsistencyModelPipeline(**a )
lowerCAmelCase__ : Tuple = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : str = self.get_dummy_inputs(a )
lowerCAmelCase__ : str = pipe(**a ).images
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase__ : str = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Tuple = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ : Tuple = self.get_dummy_components(class_cond=a )
lowerCAmelCase__ : Union[str, Any] = ConsistencyModelPipeline(**a )
lowerCAmelCase__ : Tuple = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : List[Any] = self.get_dummy_inputs(a )
lowerCAmelCase__ : int = 0
lowerCAmelCase__ : Union[str, Any] = pipe(**a ).images
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase__ : Tuple = image[0, -3:, -3:, -1]
lowerCAmelCase__ : str = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ : Union[str, Any] = self.get_dummy_components()
lowerCAmelCase__ : Tuple = ConsistencyModelPipeline(**a )
lowerCAmelCase__ : Dict = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Union[str, Any] = self.get_dummy_inputs(a )
lowerCAmelCase__ : Optional[Any] = 1
lowerCAmelCase__ : Dict = None
lowerCAmelCase__ : List[Any] = pipe(**a ).images
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase__ : Tuple = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Optional[Any] = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ : Optional[int] = self.get_dummy_components(class_cond=a )
lowerCAmelCase__ : List[Any] = ConsistencyModelPipeline(**a )
lowerCAmelCase__ : Optional[Any] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Tuple = self.get_dummy_inputs(a )
lowerCAmelCase__ : Dict = 1
lowerCAmelCase__ : Tuple = None
lowerCAmelCase__ : Optional[Any] = 0
lowerCAmelCase__ : str = pipe(**a ).images
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase__ : Union[str, Any] = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Dict = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self : Optional[Any] , a : Tuple=0 , a : Optional[Any]=False , a : Optional[Any]="cpu" , a : Union[str, Any]=torch.floataa , a : Dict=(1, 3, 64, 64) ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = torch.manual_seed(a )
lowerCAmelCase__ : List[Any] = {
'num_inference_steps': None,
'timesteps': [22, 0],
'class_labels': 0,
'generator': generator,
'output_type': 'np',
}
if get_fixed_latents:
lowerCAmelCase__ : Optional[int] = self.get_fixed_latents(seed=a , device=a , dtype=a , shape=a )
lowerCAmelCase__ : Tuple = latents
return inputs
def _lowerCamelCase ( self : str , a : Tuple=0 , a : Tuple="cpu" , a : Tuple=torch.floataa , a : str=(1, 3, 64, 64) ):
'''simple docstring'''
if type(a ) == str:
lowerCAmelCase__ : str = torch.device(a )
lowerCAmelCase__ : List[str] = torch.Generator(device=a ).manual_seed(a )
lowerCAmelCase__ : Any = randn_tensor(a , generator=a , device=a , dtype=a )
return latents
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : int = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
lowerCAmelCase__ : List[str] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
lowerCAmelCase__ : List[Any] = ConsistencyModelPipeline(unet=a , scheduler=a )
pipe.to(torch_device=a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Optional[Any] = self.get_inputs()
lowerCAmelCase__ : Dict = pipe(**a ).images
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ : List[str] = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Union[str, Any] = np.array([0.0_8_8_8, 0.0_8_8_1, 0.0_6_6_6, 0.0_4_7_9, 0.0_2_9_2, 0.0_1_9_5, 0.0_2_0_1, 0.0_1_6_3, 0.0_2_5_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
lowerCAmelCase__ : Any = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
lowerCAmelCase__ : Optional[int] = ConsistencyModelPipeline(unet=a , scheduler=a )
pipe.to(torch_device=a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : List[str] = self.get_inputs()
lowerCAmelCase__ : Union[str, Any] = 1
lowerCAmelCase__ : List[str] = None
lowerCAmelCase__ : List[str] = pipe(**a ).images
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ : Optional[int] = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Union[str, Any] = np.array([0.0_3_4_0, 0.0_1_5_2, 0.0_0_6_3, 0.0_2_6_7, 0.0_2_2_1, 0.0_1_0_7, 0.0_4_1_6, 0.0_1_8_6, 0.0_2_1_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
@require_torch_a
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : int = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
lowerCAmelCase__ : List[Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
lowerCAmelCase__ : Tuple = ConsistencyModelPipeline(unet=a , scheduler=a )
pipe.to(torch_device=a , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : str = self.get_inputs(get_fixed_latents=a , device=a )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=a , enable_math=a , enable_mem_efficient=a ):
lowerCAmelCase__ : Dict = pipe(**a ).images
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ : str = image[0, -3:, -3:, -1]
lowerCAmelCase__ : str = np.array([0.1_8_7_5, 0.1_4_2_8, 0.1_2_8_9, 0.2_1_5_1, 0.2_0_9_2, 0.1_4_7_7, 0.1_8_7_7, 0.1_6_4_1, 0.1_3_5_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@require_torch_a
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
lowerCAmelCase__ : List[Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
lowerCAmelCase__ : Dict = ConsistencyModelPipeline(unet=a , scheduler=a )
pipe.to(torch_device=a , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Any = self.get_inputs(get_fixed_latents=a , device=a )
lowerCAmelCase__ : List[str] = 1
lowerCAmelCase__ : str = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=a , enable_math=a , enable_mem_efficient=a ):
lowerCAmelCase__ : List[str] = pipe(**a ).images
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ : Dict = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Optional[int] = np.array([0.1_6_6_3, 0.1_9_4_8, 0.2_2_7_5, 0.1_6_8_0, 0.1_2_0_4, 0.1_2_4_5, 0.1_8_5_8, 0.1_3_3_8, 0.2_0_9_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 | 69 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class _lowercase ( unittest.TestCase ):
def __init__( self , a , a=7 , a=3 , a=1_8 , a=3_0 , a=4_0_0 , a=True , a=3_2 , a=True , ):
snake_case__ : Optional[int] =parent
snake_case__ : Union[str, Any] =batch_size
snake_case__ : int =num_channels
snake_case__ : str =image_size
snake_case__ : int =min_resolution
snake_case__ : Dict =max_resolution
snake_case__ : List[Any] =do_resize
snake_case__ : Tuple =size_divisor
snake_case__ : Optional[int] =do_rescale
def lowercase__ ( self ):
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class _lowercase ( lowerCAmelCase__ , unittest.TestCase ):
_a : List[str] = GLPNImageProcessor if is_vision_available() else None
def lowercase__ ( self ):
snake_case__ : int =GLPNImageProcessingTester(self )
@property
def lowercase__ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self ):
snake_case__ : str =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase_ , """do_resize""" ) )
self.assertTrue(hasattr(lowercase_ , """size_divisor""" ) )
self.assertTrue(hasattr(lowercase_ , """resample""" ) )
self.assertTrue(hasattr(lowercase_ , """do_rescale""" ) )
def lowercase__ ( self ):
pass
def lowercase__ ( self ):
# Initialize image_processing
snake_case__ : List[Any] =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ : List[str] =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
snake_case__ : int =image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def lowercase__ ( self ):
# Initialize image_processing
snake_case__ : str =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ : str =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , numpify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
snake_case__ : Any =image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def lowercase__ ( self ):
# Initialize image_processing
snake_case__ : List[str] =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ : List[Any] =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , torchify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
snake_case__ : Any =image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
| 385 |
"""simple docstring"""
from math import sqrt
def __lowercase ( _a ):
assert isinstance(_a , _a ) and (
number >= 0
), "'number' must been an int and positive"
snake_case_ : List[str] = True
# 0 and 1 are none primes.
if number <= 1:
snake_case_ : Optional[int] = False
for divisor in range(2 , int(round(sqrt(_a ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
snake_case_ : List[Any] = False
break
# precondition
assert isinstance(_a , _a ), "'status' must been from type bool"
return status
def __lowercase ( _a ):
assert isinstance(_a , _a ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
snake_case_ : int = list(range(2 , n + 1 ) )
snake_case_ : Optional[int] = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(_a ) ):
for j in range(i + 1 , len(_a ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
snake_case_ : List[Any] = 0
# filters actual prime numbers.
snake_case_ : str = [x for x in begin_list if x != 0]
# precondition
assert isinstance(_a , _a ), "'ans' must been from type list"
return ans
def __lowercase ( _a ):
assert isinstance(_a , _a ) and (n > 2), "'N' must been an int and > 2"
snake_case_ : List[str] = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(_a ):
ans.append(_a )
# precondition
assert isinstance(_a , _a ), "'ans' must been from type list"
return ans
def __lowercase ( _a ):
assert isinstance(_a , _a ) and number >= 0, "'number' must been an int and >= 0"
snake_case_ : Optional[int] = [] # this list will be returns of the function.
# potential prime number factors.
snake_case_ : Optional[Any] = 2
snake_case_ : List[str] = number
if number == 0 or number == 1:
ans.append(_a )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(_a ):
while quotient != 1:
if is_prime(_a ) and (quotient % factor == 0):
ans.append(_a )
quotient /= factor
else:
factor += 1
else:
ans.append(_a )
# precondition
assert isinstance(_a , _a ), "'ans' must been from type list"
return ans
def __lowercase ( _a ):
assert isinstance(_a , _a ) and (
number >= 0
), "'number' bust been an int and >= 0"
snake_case_ : str = 0
# prime factorization of 'number'
snake_case_ : Union[str, Any] = prime_factorization(_a )
snake_case_ : int = max(_a )
# precondition
assert isinstance(_a , _a ), "'ans' must been from type int"
return ans
def __lowercase ( _a ):
assert isinstance(_a , _a ) and (
number >= 0
), "'number' bust been an int and >= 0"
snake_case_ : List[Any] = 0
# prime factorization of 'number'
snake_case_ : Union[str, Any] = prime_factorization(_a )
snake_case_ : int = min(_a )
# precondition
assert isinstance(_a , _a ), "'ans' must been from type int"
return ans
def __lowercase ( _a ):
assert isinstance(_a , _a ), "'number' must been an int"
assert isinstance(number % 2 == 0 , _a ), "compare bust been from type bool"
return number % 2 == 0
def __lowercase ( _a ):
assert isinstance(_a , _a ), "'number' must been an int"
assert isinstance(number % 2 != 0 , _a ), "compare bust been from type bool"
return number % 2 != 0
def __lowercase ( _a ):
assert (
isinstance(_a , _a ) and (number > 2) and is_even(_a )
), "'number' must been an int, even and > 2"
snake_case_ : Optional[Any] = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
snake_case_ : Dict = get_prime_numbers(_a )
snake_case_ : Optional[int] = len(_a )
# run variable for while-loops.
snake_case_ : List[str] = 0
snake_case_ : Optional[Any] = None
# exit variable. for break up the loops
snake_case_ : List[Any] = True
while i < len_pn and loop:
snake_case_ : Optional[int] = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
snake_case_ : List[Any] = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(_a , _a )
and (len(_a ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def __lowercase ( _a , _a ):
assert (
isinstance(_a , _a )
and isinstance(_a , _a )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
snake_case_ : int = 0
while numbera != 0:
snake_case_ : Optional[Any] = numbera % numbera
snake_case_ : Tuple = numbera
snake_case_ : str = rest
# precondition
assert isinstance(_a , _a ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def __lowercase ( _a , _a ):
assert (
isinstance(_a , _a )
and isinstance(_a , _a )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
snake_case_ : str = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
snake_case_ : str = prime_factorization(_a )
snake_case_ : Optional[Any] = prime_factorization(_a )
elif numbera == 1 or numbera == 1:
snake_case_ : Dict = []
snake_case_ : Any = []
snake_case_ : str = max(_a , _a )
snake_case_ : Union[str, Any] = 0
snake_case_ : Optional[Any] = 0
snake_case_ : Union[str, Any] = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
snake_case_ : int = prime_fac_a.count(_a )
snake_case_ : Optional[Any] = prime_fac_a.count(_a )
for _ in range(max(_a , _a ) ):
ans *= n
else:
snake_case_ : Optional[Any] = prime_fac_a.count(_a )
for _ in range(_a ):
ans *= n
done.append(_a )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
snake_case_ : Any = prime_fac_a.count(_a )
for _ in range(_a ):
ans *= n
done.append(_a )
# precondition
assert isinstance(_a , _a ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def __lowercase ( _a ):
assert isinstance(_a , _a ) and (n >= 0), "'number' must been a positive int"
snake_case_ : List[Any] = 0
snake_case_ : Any = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(_a ):
ans += 1
# precondition
assert isinstance(_a , _a ) and is_prime(
_a ), "'ans' must been a prime number and from type int"
return ans
def __lowercase ( _a , _a ):
assert (
is_prime(_a ) and is_prime(_a ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
snake_case_ : List[str] = p_number_a + 1 # jump to the next number
snake_case_ : Tuple = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(_a ):
number += 1
while number < p_number_a:
ans.append(_a )
number += 1
# fetch the next prime number.
while not is_prime(_a ):
number += 1
# precondition
assert (
isinstance(_a , _a )
and ans[0] != p_number_a
and ans[len(_a ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def __lowercase ( _a ):
assert isinstance(_a , _a ) and (n >= 1), "'n' must been int and >= 1"
snake_case_ : Optional[Any] = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(_a )
# precondition
assert ans[0] == 1 and ans[len(_a ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def __lowercase ( _a ):
assert isinstance(_a , _a ) and (
number > 1
), "'number' must been an int and >= 1"
snake_case_ : List[Any] = get_divisors(_a )
# precondition
assert (
isinstance(_a , _a )
and (divisors[0] == 1)
and (divisors[len(_a ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def __lowercase ( _a , _a ):
assert (
isinstance(_a , _a )
and isinstance(_a , _a )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
snake_case_ : List[Any] = gcd(abs(_a ) , abs(_a ) )
# precondition
assert (
isinstance(_a , _a )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def __lowercase ( _a ):
assert isinstance(_a , _a ) and (n >= 0), "'n' must been a int and >= 0"
snake_case_ : Dict = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def __lowercase ( _a ):
assert isinstance(_a , _a ) and (n >= 0), "'n' must been an int and >= 0"
snake_case_ : List[Any] = 0
snake_case_ : int = 1
snake_case_ : Any = 1 # this will be return
for _ in range(n - 1 ):
snake_case_ : List[Any] = ans
ans += fiba
snake_case_ : Any = tmp
return ans
| 123 | 0 |
from __future__ import annotations
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if len(SCREAMING_SNAKE_CASE__ ) <= 1 or n <= 1:
return
insert_next(SCREAMING_SNAKE_CASE__ ,n - 1 )
rec_insertion_sort(SCREAMING_SNAKE_CASE__ ,n - 1 )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if index >= len(SCREAMING_SNAKE_CASE__ ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
lowerCAmelCase , lowerCAmelCase : Dict = (
collection[index],
collection[index - 1],
)
insert_next(SCREAMING_SNAKE_CASE__ ,index + 1 )
if __name__ == "__main__":
lowerCAmelCase : int =input('Enter integers separated by spaces: ')
lowerCAmelCase : list[int] =[int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 693 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase : int ={
'configuration_poolformer': [
'POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'PoolFormerConfig',
'PoolFormerOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[Any] =['PoolFormerFeatureExtractor']
lowerCAmelCase : List[str] =['PoolFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Tuple =[
'POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PoolFormerForImageClassification',
'PoolFormerModel',
'PoolFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Any =_LazyModule(__name__, globals()['__file__'], _import_structure)
| 693 | 1 |
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
_a : Any = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class UpperCamelCase_ ( datasets.BuilderConfig ):
"""simple docstring"""
A = None
def UpperCamelCase__ ( _A: "pyspark.sql.DataFrame" , _A: List[int] , ):
'''simple docstring'''
import pyspark
def generate_fn():
__lowerCamelCase = df.select("""*""" , pyspark.sql.functions.spark_partition_id().alias("""part_id""" ) )
for partition_id in partition_order:
__lowerCamelCase = df_with_partition_id.select("""*""" ).where(f'''part_id = {partition_id}''' ).drop("""part_id""" )
__lowerCamelCase = partition_df.collect()
__lowerCamelCase = 0
for row in rows:
yield f'''{partition_id}_{row_id}''', row.asDict()
row_id += 1
return generate_fn
class UpperCamelCase_ ( _BaseExamplesIterable ):
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=None , ):
__lowerCamelCase = df
__lowerCamelCase = partition_order or range(self.df.rdd.getNumPartitions() )
__lowerCamelCase = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self ):
yield from self.generate_examples_fn()
def lowerCamelCase_ ( self , UpperCAmelCase ):
__lowerCamelCase = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(UpperCAmelCase )
return SparkExamplesIterable(self.df , partition_order=UpperCAmelCase )
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase ):
__lowerCamelCase = self.split_shard_indices_by_worker(UpperCAmelCase , UpperCAmelCase )
return SparkExamplesIterable(self.df , partition_order=UpperCAmelCase )
@property
def lowerCamelCase_ ( self ):
return len(self.partition_order )
class UpperCamelCase_ ( datasets.DatasetBuilder ):
"""simple docstring"""
A = SparkConfig
def __init__( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None , **UpperCAmelCase , ):
import pyspark
__lowerCamelCase = pyspark.sql.SparkSession.builder.getOrCreate()
__lowerCamelCase = df
__lowerCamelCase = working_dir
super().__init__(
cache_dir=UpperCAmelCase , config_name=str(self.df.semanticHash() ) , **UpperCAmelCase , )
def lowerCamelCase_ ( self ):
# Returns the path of the created file.
def create_cache_and_write_probe(UpperCAmelCase ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=UpperCAmelCase )
__lowerCamelCase = os.path.join(self._cache_dir , """fs_test""" + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(UpperCAmelCase , """a""" )
return [probe_file]
if self._spark.conf.get("""spark.master""" , """""" ).startswith("""local""" ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
__lowerCamelCase = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(UpperCAmelCase ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
"""When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir""" )
def lowerCamelCase_ ( self ):
return datasets.DatasetInfo(features=self.config.features )
def lowerCamelCase_ ( self , UpperCAmelCase ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def lowerCamelCase_ ( self , UpperCAmelCase ):
import pyspark
def get_arrow_batch_size(UpperCAmelCase ):
for batch in it:
yield pa.RecordBatch.from_pydict({"""batch_bytes""": [batch.nbytes]} )
__lowerCamelCase = self.df.count()
__lowerCamelCase = df_num_rows if df_num_rows <= 1_0_0 else 1_0_0
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
__lowerCamelCase = (
self.df.limit(UpperCAmelCase )
.repartition(1 )
.mapInArrow(UpperCAmelCase , """batch_bytes: long""" )
.agg(pyspark.sql.functions.sum("""batch_bytes""" ).alias("""sample_bytes""" ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
__lowerCamelCase = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
__lowerCamelCase = min(UpperCAmelCase , int(approx_total_size / max_shard_size ) )
__lowerCamelCase = self.df.repartition(UpperCAmelCase )
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ):
import pyspark
__lowerCamelCase = ParquetWriter if file_format == """parquet""" else ArrowWriter
__lowerCamelCase = os.path.join(self._working_dir , os.path.basename(UpperCAmelCase ) ) if self._working_dir else fpath
__lowerCamelCase = file_format == """parquet"""
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
__lowerCamelCase = self.config.features
__lowerCamelCase = self._writer_batch_size
__lowerCamelCase = self._fs.storage_options
def write_arrow(UpperCAmelCase ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
__lowerCamelCase = pyspark.TaskContext().taskAttemptId()
__lowerCamelCase = next(UpperCAmelCase , UpperCAmelCase )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
__lowerCamelCase = 0
__lowerCamelCase = writer_class(
features=UpperCAmelCase , path=working_fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , writer_batch_size=UpperCAmelCase , storage_options=UpperCAmelCase , embed_local_files=UpperCAmelCase , )
__lowerCamelCase = pa.Table.from_batches([first_batch] )
writer.write_table(UpperCAmelCase )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
__lowerCamelCase , __lowerCamelCase = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
shard_id += 1
__lowerCamelCase = writer_class(
features=writer._features , path=working_fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , writer_batch_size=UpperCAmelCase , storage_options=UpperCAmelCase , embed_local_files=UpperCAmelCase , )
__lowerCamelCase = pa.Table.from_batches([batch] )
writer.write_table(UpperCAmelCase )
if writer._num_bytes > 0:
__lowerCamelCase , __lowerCamelCase = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(UpperCAmelCase ) ):
__lowerCamelCase = os.path.join(os.path.dirname(UpperCAmelCase ) , os.path.basename(UpperCAmelCase ) )
shutil.move(UpperCAmelCase , UpperCAmelCase )
__lowerCamelCase = (
self.df.mapInArrow(UpperCAmelCase , """task_id: long, num_examples: long, num_bytes: long""" )
.groupBy("""task_id""" )
.agg(
pyspark.sql.functions.sum("""num_examples""" ).alias("""total_num_examples""" ) , pyspark.sql.functions.sum("""num_bytes""" ).alias("""total_num_bytes""" ) , pyspark.sql.functions.count("""num_bytes""" ).alias("""num_shards""" ) , pyspark.sql.functions.collect_list("""num_examples""" ).alias("""shard_lengths""" ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase = "arrow" , UpperCAmelCase = None , UpperCAmelCase = None , **UpperCAmelCase , ):
self._validate_cache_dir()
__lowerCamelCase = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(UpperCAmelCase )
__lowerCamelCase = not is_remote_filesystem(self._fs )
__lowerCamelCase = os.path.join if is_local else posixpath.join
__lowerCamelCase = """-TTTTT-SSSSS-of-NNNNN"""
__lowerCamelCase = f'''{self.name}-{split_generator.name}{SUFFIX}.{file_format}'''
__lowerCamelCase = path_join(self._output_dir , UpperCAmelCase )
__lowerCamelCase = 0
__lowerCamelCase = 0
__lowerCamelCase = 0
__lowerCamelCase = []
__lowerCamelCase = []
for task_id, content in self._prepare_split_single(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(UpperCAmelCase )
__lowerCamelCase = total_num_examples
__lowerCamelCase = total_num_bytes
# should rename everything at the end
logger.debug(f'''Renaming {total_shards} shards.''' )
if total_shards > 1:
__lowerCamelCase = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
__lowerCamelCase = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ):
rename(
UpperCAmelCase , fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , fpath.replace("""TTTTT-SSSSS""" , f'''{global_shard_id:05d}''' ).replace("""NNNNN""" , f'''{total_shards:05d}''' ) , )
__lowerCamelCase = []
__lowerCamelCase = 0
for i in range(len(UpperCAmelCase ) ):
__lowerCamelCase , __lowerCamelCase = task_id_and_num_shards[i]
for shard_id in range(UpperCAmelCase ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(UpperCAmelCase , len(UpperCAmelCase ) ).map(lambda UpperCAmelCase : _rename_shard(*UpperCAmelCase ) ).collect()
else:
# don't use any pattern
__lowerCamelCase = 0
__lowerCamelCase = task_id_and_num_shards[0][0]
self._rename(
fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , fpath.replace(UpperCAmelCase , """""" ) , )
def lowerCamelCase_ ( self , UpperCAmelCase , ):
return SparkExamplesIterable(self.df )
| 479 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_a : List[str] = {
'configuration_data2vec_audio': ['DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Data2VecAudioConfig'],
'configuration_data2vec_text': [
'DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Data2VecTextConfig',
'Data2VecTextOnnxConfig',
],
'configuration_data2vec_vision': [
'DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Data2VecVisionConfig',
'Data2VecVisionOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Optional[Any] = [
'DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecAudioForAudioFrameClassification',
'Data2VecAudioForCTC',
'Data2VecAudioForSequenceClassification',
'Data2VecAudioForXVector',
'Data2VecAudioModel',
'Data2VecAudioPreTrainedModel',
]
_a : str = [
'DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecTextForCausalLM',
'Data2VecTextForMaskedLM',
'Data2VecTextForMultipleChoice',
'Data2VecTextForQuestionAnswering',
'Data2VecTextForSequenceClassification',
'Data2VecTextForTokenClassification',
'Data2VecTextModel',
'Data2VecTextPreTrainedModel',
]
_a : Tuple = [
'DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecVisionForImageClassification',
'Data2VecVisionForMaskedImageModeling',
'Data2VecVisionForSemanticSegmentation',
'Data2VecVisionModel',
'Data2VecVisionPreTrainedModel',
]
if is_tf_available():
_a : Dict = [
'TFData2VecVisionForImageClassification',
'TFData2VecVisionForSemanticSegmentation',
'TFData2VecVisionModel',
'TFData2VecVisionPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
_a : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 479 | 1 |
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
a_ : List[Any] = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
a_ : Dict = 25_0004
a_ : Dict = 25_0020
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
_a = MBartTokenizer
_a = MBartTokenizerFast
_a = True
_a = True
def __A ( self ) -> Tuple:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__magic_name__ = MBartTokenizer(lowercase__ , keep_accents=lowercase__ )
tokenizer.save_pretrained(self.tmpdirname )
def __A ( self ) -> str:
'''simple docstring'''
__magic_name__ = MBartTokenizer(lowercase__ , keep_accents=lowercase__ )
__magic_name__ = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(lowercase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase__ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
__magic_name__ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
lowercase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
__magic_name__ = tokenizer.convert_tokens_to_ids(lowercase__ )
self.assertListEqual(
lowercase__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
__magic_name__ = tokenizer.convert_ids_to_tokens(lowercase__ )
self.assertListEqual(
lowercase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def __A ( self ) -> List[Any]:
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
__magic_name__ = (self.rust_tokenizer_class, "hf-internal-testing/tiny-random-mbart", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__magic_name__ = self.rust_tokenizer_class.from_pretrained(lowercase__ , **lowercase__ )
__magic_name__ = self.tokenizer_class.from_pretrained(lowercase__ , **lowercase__ )
__magic_name__ = tempfile.mkdtemp()
__magic_name__ = tokenizer_r.save_pretrained(lowercase__ )
__magic_name__ = tokenizer_p.save_pretrained(lowercase__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
__magic_name__ = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(lowercase__ , lowercase__ )
# Checks everything loads correctly in the same way
__magic_name__ = tokenizer_r.from_pretrained(lowercase__ )
__magic_name__ = tokenizer_p.from_pretrained(lowercase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowercase__ , lowercase__ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowercase__ )
# Save tokenizer rust, legacy_format=True
__magic_name__ = tempfile.mkdtemp()
__magic_name__ = tokenizer_r.save_pretrained(lowercase__ , legacy_format=lowercase__ )
__magic_name__ = tokenizer_p.save_pretrained(lowercase__ )
# Checks it save with the same files
self.assertSequenceEqual(lowercase__ , lowercase__ )
# Checks everything loads correctly in the same way
__magic_name__ = tokenizer_r.from_pretrained(lowercase__ )
__magic_name__ = tokenizer_p.from_pretrained(lowercase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowercase__ , lowercase__ ) )
shutil.rmtree(lowercase__ )
# Save tokenizer rust, legacy_format=False
__magic_name__ = tempfile.mkdtemp()
__magic_name__ = tokenizer_r.save_pretrained(lowercase__ , legacy_format=lowercase__ )
__magic_name__ = tokenizer_p.save_pretrained(lowercase__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
__magic_name__ = tokenizer_r.from_pretrained(lowercase__ )
__magic_name__ = tokenizer_p.from_pretrained(lowercase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowercase__ , lowercase__ ) )
shutil.rmtree(lowercase__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
_a = """facebook/mbart-large-en-ro"""
_a = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""",
]
_a = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"""
""" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"""
""" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
_a = [8274, 127873, 25916, 7, 8622, 2071, 438, 67485, 53, 187895, 23, 51712, 2, EN_CODE]
@classmethod
def __A ( cls ) -> Dict:
'''simple docstring'''
__magic_name__ = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en_XX''' , tgt_lang='''ro_RO''' )
__magic_name__ = 1
return cls
def __A ( self ) -> Optional[int]:
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''] , 25_00_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''] , 25_00_04 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''] , 25_00_20 )
def __A ( self ) -> Any:
'''simple docstring'''
__magic_name__ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowercase__ )
def __A ( self ) -> Dict:
'''simple docstring'''
self.assertIn(lowercase__ , self.tokenizer.all_special_ids )
__magic_name__ = [RO_CODE, 8_84, 90_19, 96, 9, 9_16, 8_67_92, 36, 1_87_43, 1_55_96, 5, 2]
__magic_name__ = self.tokenizer.decode(lowercase__ , skip_special_tokens=lowercase__ )
__magic_name__ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowercase__ )
self.assertEqual(lowercase__ , lowercase__ )
self.assertNotIn(self.tokenizer.eos_token , lowercase__ )
def __A ( self ) -> List[Any]:
'''simple docstring'''
__magic_name__ = ["this is gunna be a long sentence " * 20]
assert isinstance(src_text[0] , lowercase__ )
__magic_name__ = 10
__magic_name__ = self.tokenizer(lowercase__ , max_length=lowercase__ , truncation=lowercase__ ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , lowercase__ )
self.assertEqual(len(lowercase__ ) , lowercase__ )
def __A ( self ) -> int:
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [25_00_26, 25_00_01] )
def __A ( self ) -> List[Any]:
'''simple docstring'''
__magic_name__ = tempfile.mkdtemp()
__magic_name__ = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowercase__ )
__magic_name__ = MBartTokenizer.from_pretrained(lowercase__ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowercase__ )
@require_torch
def __A ( self ) -> Tuple:
'''simple docstring'''
__magic_name__ = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowercase__ , return_tensors='''pt''' )
__magic_name__ = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def __A ( self ) -> Dict:
'''simple docstring'''
__magic_name__ = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=lowercase__ , truncation=lowercase__ , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
__magic_name__ = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
self.assertIsInstance(lowercase__ , lowercase__ )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
__magic_name__ = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , lowercase__ )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def __A ( self ) -> int:
'''simple docstring'''
__magic_name__ = self.tokenizer(self.src_text , padding=lowercase__ , truncation=lowercase__ , max_length=3 , return_tensors='''pt''' )
__magic_name__ = self.tokenizer(
text_target=self.tgt_text , padding=lowercase__ , truncation=lowercase__ , max_length=10 , return_tensors='''pt''' )
__magic_name__ = targets["input_ids"]
__magic_name__ = shift_tokens_right(lowercase__ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def __A ( self ) -> List[Any]:
'''simple docstring'''
__magic_name__ = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''ar_AR''' )
self.assertEqual(
nested_simplify(lowercase__ ) , {
# A, test, EOS, en_XX
'''input_ids''': [[62, 30_34, 2, 25_00_04]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 25_00_01,
} , ) | 702 |
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] ):
__magic_name__ = SwinConfig(image_size=192 )
if "base" in model_name:
__magic_name__ = 6
__magic_name__ = 128
__magic_name__ = (2, 2, 18, 2)
__magic_name__ = (4, 8, 16, 32)
elif "large" in model_name:
__magic_name__ = 12
__magic_name__ = 192
__magic_name__ = (2, 2, 18, 2)
__magic_name__ = (6, 12, 24, 48)
else:
raise ValueError('''Model not supported, only supports base and large variants''' )
__magic_name__ = window_size
__magic_name__ = embed_dim
__magic_name__ = depths
__magic_name__ = num_heads
return config
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] ):
if "encoder.mask_token" in name:
__magic_name__ = name.replace('''encoder.mask_token''' , '''embeddings.mask_token''' )
if "encoder.patch_embed.proj" in name:
__magic_name__ = name.replace('''encoder.patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "encoder.patch_embed.norm" in name:
__magic_name__ = name.replace('''encoder.patch_embed.norm''' , '''embeddings.norm''' )
if "attn.proj" in name:
__magic_name__ = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
__magic_name__ = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
__magic_name__ = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
__magic_name__ = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
__magic_name__ = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
__magic_name__ = name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "encoder.norm.weight":
__magic_name__ = '''layernorm.weight'''
if name == "encoder.norm.bias":
__magic_name__ = '''layernorm.bias'''
if "decoder" in name:
pass
else:
__magic_name__ = '''swin.''' + name
return name
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : Any ):
for key in orig_state_dict.copy().keys():
__magic_name__ = orig_state_dict.pop(snake_case_ )
if "attn_mask" in key:
pass
elif "qkv" in key:
__magic_name__ = key.split('''.''' )
__magic_name__ = int(key_split[2] )
__magic_name__ = int(key_split[4] )
__magic_name__ = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__magic_name__ = val[:dim, :]
__magic_name__ = val[
dim : dim * 2, :
]
__magic_name__ = val[-dim:, :]
else:
__magic_name__ = val[
:dim
]
__magic_name__ = val[
dim : dim * 2
]
__magic_name__ = val[
-dim:
]
else:
__magic_name__ = val
return orig_state_dict
def _SCREAMING_SNAKE_CASE ( snake_case_ : List[str] , snake_case_ : int , snake_case_ : Any , snake_case_ : str ):
__magic_name__ = torch.load(snake_case_ , map_location='''cpu''' )['''model''']
__magic_name__ = get_swin_config(snake_case_ )
__magic_name__ = SwinForMaskedImageModeling(snake_case_ )
model.eval()
__magic_name__ = convert_state_dict(snake_case_ , snake_case_ )
model.load_state_dict(snake_case_ )
__magic_name__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__magic_name__ = ViTImageProcessor(size={'''height''': 192, '''width''': 192} )
__magic_name__ = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw )
__magic_name__ = image_processor(images=snake_case_ , return_tensors='''pt''' )
with torch.no_grad():
__magic_name__ = model(**snake_case_ ).logits
print(outputs.keys() )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(snake_case_ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(snake_case_ )
if push_to_hub:
print(f'Pushing model and image processor for {model_name} to hub' )
model.push_to_hub(f'microsoft/{model_name}' )
image_processor.push_to_hub(f'microsoft/{model_name}' )
if __name__ == "__main__":
a_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='swin-base-simmim-window6-192',
type=str,
choices=['swin-base-simmim-window6-192', 'swin-large-simmim-window12-192'],
help='Name of the Swin SimMIM model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path',
default='/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth',
type=str,
help='Path to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
a_ : Optional[Any] = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub) | 678 | 0 |
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"""kakaobrain/align-base""": """https://huggingface.co/kakaobrain/align-base/resolve/main/config.json""",
}
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : List[str] ="""align_text_model"""
def __init__( self : List[str] , UpperCamelCase : Union[str, Any]=3_05_22 , UpperCamelCase : Optional[int]=7_68 , UpperCamelCase : Dict=12 , UpperCamelCase : List[str]=12 , UpperCamelCase : str=30_72 , UpperCamelCase : List[Any]="gelu" , UpperCamelCase : Optional[int]=0.1 , UpperCamelCase : Optional[Any]=0.1 , UpperCamelCase : Tuple=5_12 , UpperCamelCase : List[Any]=2 , UpperCamelCase : str=0.02 , UpperCamelCase : Union[str, Any]=1e-1_2 , UpperCamelCase : int=0 , UpperCamelCase : List[str]="absolute" , UpperCamelCase : Optional[int]=True , **UpperCamelCase : Union[str, Any] , ):
'''simple docstring'''
super().__init__(**UpperCamelCase )
_snake_case : Optional[int] = vocab_size
_snake_case : Tuple = hidden_size
_snake_case : Optional[int] = num_hidden_layers
_snake_case : Any = num_attention_heads
_snake_case : Optional[Any] = hidden_act
_snake_case : int = intermediate_size
_snake_case : List[str] = hidden_dropout_prob
_snake_case : Any = attention_probs_dropout_prob
_snake_case : Any = max_position_embeddings
_snake_case : List[str] = type_vocab_size
_snake_case : Tuple = initializer_range
_snake_case : int = layer_norm_eps
_snake_case : Optional[int] = position_embedding_type
_snake_case : str = use_cache
_snake_case : Optional[int] = pad_token_id
@classmethod
def UpperCamelCase_ ( cls : List[Any] , UpperCamelCase : Union[str, os.PathLike] , **UpperCamelCase : Optional[int] ):
'''simple docstring'''
cls._set_token_in_kwargs(UpperCamelCase )
_snake_case , _snake_case : Optional[int] = cls.get_config_dict(UpperCamelCase , **UpperCamelCase )
# get the text config dict if we are loading from AlignConfig
if config_dict.get('model_type' ) == "align":
_snake_case : List[str] = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(UpperCamelCase , **UpperCamelCase )
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : List[str] ="""align_vision_model"""
def __init__( self : Dict , UpperCamelCase : int = 3 , UpperCamelCase : int = 6_00 , UpperCamelCase : float = 2.0 , UpperCamelCase : float = 3.1 , UpperCamelCase : int = 8 , UpperCamelCase : List[int] = [3, 3, 5, 3, 5, 5, 3] , UpperCamelCase : List[int] = [32, 16, 24, 40, 80, 1_12, 1_92] , UpperCamelCase : List[int] = [16, 24, 40, 80, 1_12, 1_92, 3_20] , UpperCamelCase : List[int] = [] , UpperCamelCase : List[int] = [1, 2, 2, 2, 1, 2, 1] , UpperCamelCase : List[int] = [1, 2, 2, 3, 3, 4, 1] , UpperCamelCase : List[int] = [1, 6, 6, 6, 6, 6, 6] , UpperCamelCase : float = 0.25 , UpperCamelCase : str = "swish" , UpperCamelCase : int = 25_60 , UpperCamelCase : str = "mean" , UpperCamelCase : float = 0.02 , UpperCamelCase : float = 0.0_01 , UpperCamelCase : float = 0.99 , UpperCamelCase : float = 0.2 , **UpperCamelCase : Dict , ):
'''simple docstring'''
super().__init__(**UpperCamelCase )
_snake_case : int = num_channels
_snake_case : Any = image_size
_snake_case : Dict = width_coefficient
_snake_case : str = depth_coefficient
_snake_case : Optional[Any] = depth_divisor
_snake_case : Dict = kernel_sizes
_snake_case : str = in_channels
_snake_case : Any = out_channels
_snake_case : List[Any] = depthwise_padding
_snake_case : Any = strides
_snake_case : Optional[int] = num_block_repeats
_snake_case : Optional[Any] = expand_ratios
_snake_case : int = squeeze_expansion_ratio
_snake_case : List[Any] = hidden_act
_snake_case : Optional[int] = hidden_dim
_snake_case : List[str] = pooling_type
_snake_case : Tuple = initializer_range
_snake_case : Optional[Any] = batch_norm_eps
_snake_case : int = batch_norm_momentum
_snake_case : Any = drop_connect_rate
_snake_case : Any = sum(UpperCamelCase ) * 4
@classmethod
def UpperCamelCase_ ( cls : List[str] , UpperCamelCase : Union[str, os.PathLike] , **UpperCamelCase : List[Any] ):
'''simple docstring'''
cls._set_token_in_kwargs(UpperCamelCase )
_snake_case , _snake_case : str = cls.get_config_dict(UpperCamelCase , **UpperCamelCase )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get('model_type' ) == "align":
_snake_case : Dict = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(UpperCamelCase , **UpperCamelCase )
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : List[str] ="""align"""
a_ : List[Any] =True
def __init__( self : Dict , UpperCamelCase : Optional[Any]=None , UpperCamelCase : Tuple=None , UpperCamelCase : Union[str, Any]=6_40 , UpperCamelCase : int=1.0 , UpperCamelCase : int=0.02 , **UpperCamelCase : str , ):
'''simple docstring'''
super().__init__(**UpperCamelCase )
if text_config is None:
_snake_case : str = {}
logger.info('text_config is None. Initializing the AlignTextConfig with default values.' )
if vision_config is None:
_snake_case : List[str] = {}
logger.info('vision_config is None. Initializing the AlignVisionConfig with default values.' )
_snake_case : Union[str, Any] = AlignTextConfig(**UpperCamelCase )
_snake_case : Dict = AlignVisionConfig(**UpperCamelCase )
_snake_case : Any = projection_dim
_snake_case : Optional[int] = temperature_init_value
_snake_case : List[str] = initializer_range
@classmethod
def UpperCamelCase_ ( cls : Any , UpperCamelCase : AlignTextConfig , UpperCamelCase : AlignVisionConfig , **UpperCamelCase : Tuple ):
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **UpperCamelCase )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_snake_case : Union[str, Any] = copy.deepcopy(self.__dict__ )
_snake_case : Optional[Any] = self.text_config.to_dict()
_snake_case : List[str] = self.vision_config.to_dict()
_snake_case : Tuple = self.__class__.model_type
return output
| 411 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"""tiiuae/falcon-40b""": """https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json""",
"""tiiuae/falcon-7b""": """https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json""",
}
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : List[str] ="""falcon"""
a_ : List[Any] =["""past_key_values"""]
def __init__( self : Dict , UpperCamelCase : List[Any]=6_50_24 , UpperCamelCase : Dict=45_44 , UpperCamelCase : Optional[Any]=32 , UpperCamelCase : Optional[int]=71 , UpperCamelCase : Any=1e-5 , UpperCamelCase : List[str]=0.02 , UpperCamelCase : Optional[Any]=True , UpperCamelCase : Optional[Any]=0.0 , UpperCamelCase : Any=0.0 , UpperCamelCase : Union[str, Any]=None , UpperCamelCase : Union[str, Any]=False , UpperCamelCase : Dict=False , UpperCamelCase : Dict=True , UpperCamelCase : List[Any]=True , UpperCamelCase : str=False , UpperCamelCase : Dict=11 , UpperCamelCase : Dict=11 , **UpperCamelCase : Optional[int] , ):
'''simple docstring'''
_snake_case : Optional[Any] = vocab_size
# Backward compatibility with n_embed kwarg
_snake_case : Optional[Any] = kwargs.pop('n_embed' , UpperCamelCase )
_snake_case : Dict = hidden_size if n_embed is None else n_embed
_snake_case : Dict = num_hidden_layers
_snake_case : Dict = num_attention_heads
_snake_case : List[str] = layer_norm_epsilon
_snake_case : Optional[int] = initializer_range
_snake_case : str = use_cache
_snake_case : str = hidden_dropout
_snake_case : str = attention_dropout
_snake_case : List[Any] = bos_token_id
_snake_case : Union[str, Any] = eos_token_id
_snake_case : Tuple = num_attention_heads if num_kv_heads is None else num_kv_heads
_snake_case : Union[str, Any] = alibi
_snake_case : Optional[int] = new_decoder_architecture
_snake_case : List[Any] = multi_query # Ignored when new_decoder_architecture is True
_snake_case : Dict = parallel_attn
_snake_case : Dict = bias
super().__init__(bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , **UpperCamelCase )
@property
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
return self.hidden_size // self.num_attention_heads
@property
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
return not self.alibi
| 411 | 1 |
'''simple docstring'''
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
UpperCamelCase_ = {
"sample_size": 3_2,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 2,
"num_class_embeds": 1_0_0_0,
"block_out_channels": [3_2, 6_4],
"attention_head_dim": 8,
"down_block_types": [
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "scale_shift",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
UpperCamelCase_ = {
"sample_size": 6_4,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 3,
"num_class_embeds": 1_0_0_0,
"block_out_channels": [1_9_2, 1_9_2 * 2, 1_9_2 * 3, 1_9_2 * 4],
"attention_head_dim": 6_4,
"down_block_types": [
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"AttnUpBlock2D",
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "scale_shift",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
UpperCamelCase_ = {
"sample_size": 2_5_6,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 2,
"num_class_embeds": None,
"block_out_channels": [2_5_6, 2_5_6, 2_5_6 * 2, 2_5_6 * 2, 2_5_6 * 4, 2_5_6 * 4],
"attention_head_dim": 6_4,
"down_block_types": [
"ResnetDownsampleBlock2D",
"ResnetDownsampleBlock2D",
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"AttnUpBlock2D",
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
"ResnetUpsampleBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "default",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
UpperCamelCase_ = {
"num_train_timesteps": 4_0,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
UpperCamelCase_ = {
"num_train_timesteps": 2_0_1,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
UpperCamelCase_ = {
"num_train_timesteps": 1_5_1,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
def lowercase__( __UpperCamelCase: Optional[int] ):
"""simple docstring"""
if isinstance(__UpperCamelCase ,__UpperCamelCase ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError('boolean value expected' )
def lowercase__( __UpperCamelCase: List[str] ,__UpperCamelCase: Union[str, Any] ,__UpperCamelCase: Any ,__UpperCamelCase: Union[str, Any] ,__UpperCamelCase: Optional[int]=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = checkpoint[f"{old_prefix}.in_layers.0.weight"]
SCREAMING_SNAKE_CASE : Optional[Any] = checkpoint[f"{old_prefix}.in_layers.0.bias"]
SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint[f"{old_prefix}.in_layers.2.weight"]
SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint[f"{old_prefix}.in_layers.2.bias"]
SCREAMING_SNAKE_CASE : Optional[Any] = checkpoint[f"{old_prefix}.emb_layers.1.weight"]
SCREAMING_SNAKE_CASE : int = checkpoint[f"{old_prefix}.emb_layers.1.bias"]
SCREAMING_SNAKE_CASE : List[str] = checkpoint[f"{old_prefix}.out_layers.0.weight"]
SCREAMING_SNAKE_CASE : Any = checkpoint[f"{old_prefix}.out_layers.0.bias"]
SCREAMING_SNAKE_CASE : List[Any] = checkpoint[f"{old_prefix}.out_layers.3.weight"]
SCREAMING_SNAKE_CASE : Optional[int] = checkpoint[f"{old_prefix}.out_layers.3.bias"]
if has_skip:
SCREAMING_SNAKE_CASE : Optional[int] = checkpoint[f"{old_prefix}.skip_connection.weight"]
SCREAMING_SNAKE_CASE : Dict = checkpoint[f"{old_prefix}.skip_connection.bias"]
return new_checkpoint
def lowercase__( __UpperCamelCase: Any ,__UpperCamelCase: List[Any] ,__UpperCamelCase: Union[str, Any] ,__UpperCamelCase: Any ,__UpperCamelCase: Optional[Any]=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = checkpoint[f"{old_prefix}.qkv.weight"].chunk(3 ,dim=0 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = checkpoint[f"{old_prefix}.qkv.bias"].chunk(3 ,dim=0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint[f"{old_prefix}.norm.weight"]
SCREAMING_SNAKE_CASE : Dict = checkpoint[f"{old_prefix}.norm.bias"]
SCREAMING_SNAKE_CASE : str = weight_q.squeeze(-1 ).squeeze(-1 )
SCREAMING_SNAKE_CASE : List[str] = bias_q.squeeze(-1 ).squeeze(-1 )
SCREAMING_SNAKE_CASE : Dict = weight_k.squeeze(-1 ).squeeze(-1 )
SCREAMING_SNAKE_CASE : List[str] = bias_k.squeeze(-1 ).squeeze(-1 )
SCREAMING_SNAKE_CASE : Optional[int] = weight_v.squeeze(-1 ).squeeze(-1 )
SCREAMING_SNAKE_CASE : int = bias_v.squeeze(-1 ).squeeze(-1 )
SCREAMING_SNAKE_CASE : int = (
checkpoint[f"{old_prefix}.proj_out.weight"].squeeze(-1 ).squeeze(-1 )
)
SCREAMING_SNAKE_CASE : Optional[Any] = checkpoint[f"{old_prefix}.proj_out.bias"].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def lowercase__( __UpperCamelCase: str ,__UpperCamelCase: Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.load(__UpperCamelCase ,map_location='cpu' )
SCREAMING_SNAKE_CASE : List[Any] = {}
SCREAMING_SNAKE_CASE : List[str] = checkpoint['time_embed.0.weight']
SCREAMING_SNAKE_CASE : str = checkpoint['time_embed.0.bias']
SCREAMING_SNAKE_CASE : int = checkpoint['time_embed.2.weight']
SCREAMING_SNAKE_CASE : int = checkpoint['time_embed.2.bias']
if unet_config["num_class_embeds"] is not None:
SCREAMING_SNAKE_CASE : Tuple = checkpoint['label_emb.weight']
SCREAMING_SNAKE_CASE : List[Any] = checkpoint['input_blocks.0.0.weight']
SCREAMING_SNAKE_CASE : Tuple = checkpoint['input_blocks.0.0.bias']
SCREAMING_SNAKE_CASE : List[Any] = unet_config['down_block_types']
SCREAMING_SNAKE_CASE : Tuple = unet_config['layers_per_block']
SCREAMING_SNAKE_CASE : List[str] = unet_config['attention_head_dim']
SCREAMING_SNAKE_CASE : Tuple = unet_config['block_out_channels']
SCREAMING_SNAKE_CASE : Any = 1
SCREAMING_SNAKE_CASE : Optional[Any] = channels_list[0]
for i, layer_type in enumerate(__UpperCamelCase ):
SCREAMING_SNAKE_CASE : List[str] = channels_list[i]
SCREAMING_SNAKE_CASE : Dict = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(__UpperCamelCase ):
SCREAMING_SNAKE_CASE : List[Any] = f"down_blocks.{i}.resnets.{j}"
SCREAMING_SNAKE_CASE : List[str] = f"input_blocks.{current_layer}.0"
SCREAMING_SNAKE_CASE : Dict = True if j == 0 and downsample_block_has_skip else False
SCREAMING_SNAKE_CASE : Optional[int] = convert_resnet(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,has_skip=__UpperCamelCase )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(__UpperCamelCase ):
SCREAMING_SNAKE_CASE : List[Any] = f"down_blocks.{i}.resnets.{j}"
SCREAMING_SNAKE_CASE : Union[str, Any] = f"input_blocks.{current_layer}.0"
SCREAMING_SNAKE_CASE : Any = True if j == 0 and downsample_block_has_skip else False
SCREAMING_SNAKE_CASE : List[Any] = convert_resnet(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,has_skip=__UpperCamelCase )
SCREAMING_SNAKE_CASE : Dict = f"down_blocks.{i}.attentions.{j}"
SCREAMING_SNAKE_CASE : str = f"input_blocks.{current_layer}.1"
SCREAMING_SNAKE_CASE : List[str] = convert_attention(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
current_layer += 1
if i != len(__UpperCamelCase ) - 1:
SCREAMING_SNAKE_CASE : Optional[Any] = f"down_blocks.{i}.downsamplers.0"
SCREAMING_SNAKE_CASE : Tuple = f"input_blocks.{current_layer}.0"
SCREAMING_SNAKE_CASE : Optional[Any] = convert_resnet(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
current_layer += 1
SCREAMING_SNAKE_CASE : Optional[Any] = current_channels
# hardcoded the mid-block for now
SCREAMING_SNAKE_CASE : Optional[int] = 'mid_block.resnets.0'
SCREAMING_SNAKE_CASE : List[Any] = 'middle_block.0'
SCREAMING_SNAKE_CASE : str = convert_resnet(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
SCREAMING_SNAKE_CASE : List[str] = 'mid_block.attentions.0'
SCREAMING_SNAKE_CASE : Tuple = 'middle_block.1'
SCREAMING_SNAKE_CASE : Tuple = convert_attention(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = 'mid_block.resnets.1'
SCREAMING_SNAKE_CASE : Union[str, Any] = 'middle_block.2'
SCREAMING_SNAKE_CASE : Optional[int] = convert_resnet(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE : Dict = unet_config['up_block_types']
for i, layer_type in enumerate(__UpperCamelCase ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
SCREAMING_SNAKE_CASE : Union[str, Any] = f"up_blocks.{i}.resnets.{j}"
SCREAMING_SNAKE_CASE : Union[str, Any] = f"output_blocks.{current_layer}.0"
SCREAMING_SNAKE_CASE : Optional[int] = convert_resnet(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,has_skip=__UpperCamelCase )
current_layer += 1
if i != len(__UpperCamelCase ) - 1:
SCREAMING_SNAKE_CASE : Optional[int] = f"up_blocks.{i}.upsamplers.0"
SCREAMING_SNAKE_CASE : str = f"output_blocks.{current_layer-1}.1"
SCREAMING_SNAKE_CASE : List[str] = convert_resnet(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
SCREAMING_SNAKE_CASE : Dict = f"up_blocks.{i}.resnets.{j}"
SCREAMING_SNAKE_CASE : Optional[Any] = f"output_blocks.{current_layer}.0"
SCREAMING_SNAKE_CASE : Tuple = convert_resnet(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,has_skip=__UpperCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = f"up_blocks.{i}.attentions.{j}"
SCREAMING_SNAKE_CASE : Optional[Any] = f"output_blocks.{current_layer}.1"
SCREAMING_SNAKE_CASE : Dict = convert_attention(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
current_layer += 1
if i != len(__UpperCamelCase ) - 1:
SCREAMING_SNAKE_CASE : int = f"up_blocks.{i}.upsamplers.0"
SCREAMING_SNAKE_CASE : Optional[int] = f"output_blocks.{current_layer-1}.2"
SCREAMING_SNAKE_CASE : Tuple = convert_resnet(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
SCREAMING_SNAKE_CASE : Tuple = checkpoint['out.0.weight']
SCREAMING_SNAKE_CASE : List[str] = checkpoint['out.0.bias']
SCREAMING_SNAKE_CASE : Any = checkpoint['out.2.weight']
SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint['out.2.bias']
return new_checkpoint
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument("--unet_path", default=None, type=str, required=True, help="Path to the unet.pt to convert.")
parser.add_argument(
"--dump_path", default=None, type=str, required=True, help="Path to output the converted UNet model."
)
parser.add_argument("--class_cond", default=True, type=str, help="Whether the model is class-conditional.")
UpperCamelCase_ = parser.parse_args()
UpperCamelCase_ = strabool(args.class_cond)
UpperCamelCase_ = os.path.basename(args.unet_path)
print(F"""Checkpoint: {ckpt_name}""")
# Get U-Net config
if "imagenet64" in ckpt_name:
UpperCamelCase_ = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
UpperCamelCase_ = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
UpperCamelCase_ = TEST_UNET_CONFIG
else:
raise ValueError(F"""Checkpoint type {ckpt_name} is not currently supported.""")
if not args.class_cond:
UpperCamelCase_ = None
UpperCamelCase_ = con_pt_to_diffuser(args.unet_path, unet_config)
UpperCamelCase_ = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
UpperCamelCase_ = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
UpperCamelCase_ = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
UpperCamelCase_ = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(F"""Checkpoint type {ckpt_name} is not currently supported.""")
UpperCamelCase_ = CMStochasticIterativeScheduler(**scheduler_config)
UpperCamelCase_ = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 508 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
UpperCamelCase_ = logging.getLogger(__name__)
@dataclass
class _a :
'''simple docstring'''
A : str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
A : Optional[str] = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
A : Optional[str] = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
A : Optional[str] = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
A : bool = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Whether tp freeze the encoder.'''} )
A : bool = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Whether to freeze the embeddings.'''} )
@dataclass
class _a :
'''simple docstring'''
A : str = field(
metadata={'''help''': '''The input data dir. Should contain the .tsv files (or other data files) for the task.'''} )
A : Optional[str] = field(
default='''summarization''' , metadata={'''help''': '''Task name, summarization (or summarization_{dataset} for pegasus) or translation'''} , )
A : Optional[int] = field(
default=1_024 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
A : Optional[int] = field(
default=128 , metadata={
'''help''': (
'''The maximum total sequence length for target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
A : Optional[int] = field(
default=142 , metadata={
'''help''': (
'''The maximum total sequence length for validation target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded. '''
'''This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '''
'''during ``evaluate`` and ``predict``.'''
)
} , )
A : Optional[int] = field(
default=142 , metadata={
'''help''': (
'''The maximum total sequence length for test target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
A : Optional[int] = field(default=-1 , metadata={'''help''': '''# training examples. -1 means use all.'''} )
A : Optional[int] = field(default=-1 , metadata={'''help''': '''# validation examples. -1 means use all.'''} )
A : Optional[int] = field(default=-1 , metadata={'''help''': '''# test examples. -1 means use all.'''} )
A : Optional[str] = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Source language id for translation.'''} )
A : Optional[str] = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Target language id for translation.'''} )
A : Optional[int] = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''# num_beams to use for evaluation.'''} )
A : bool = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'''} , )
def lowercase__( __UpperCamelCase: Any ,__UpperCamelCase: List[str] ,__UpperCamelCase: Dict ):
"""simple docstring"""
logger.info(f"***** {split} metrics *****" )
for key in sorted(metrics.keys() ):
logger.info(f" {key} = {metrics[key]}" )
save_json(__UpperCamelCase ,os.path.join(__UpperCamelCase ,f"{split}_results.json" ) )
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = parser.parse_args_into_dataclasses()
check_output_dir(__UpperCamelCase )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' ,datefmt='%m/%d/%Y %H:%M:%S' ,level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN ,)
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' ,training_args.local_rank ,training_args.device ,training_args.n_gpu ,bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) ,training_args.fpaa ,)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('Training/evaluation parameters %s' ,__UpperCamelCase )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE : List[str] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,)
SCREAMING_SNAKE_CASE : List[str] = ('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout')
for p in extra_model_params:
if getattr(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ):
assert hasattr(__UpperCamelCase ,__UpperCamelCase ), f"({config.__class__.__name__}) doesn't have a `{p}` attribute"
setattr(__UpperCamelCase ,__UpperCamelCase ,getattr(__UpperCamelCase ,__UpperCamelCase ) )
SCREAMING_SNAKE_CASE : Tuple = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,)
SCREAMING_SNAKE_CASE : List[str] = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path ,from_tf='.ckpt' in model_args.model_name_or_path ,config=__UpperCamelCase ,cache_dir=model_args.cache_dir ,)
# use task specific params
use_task_specific_params(__UpperCamelCase ,data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
SCREAMING_SNAKE_CASE : Optional[int] = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(__UpperCamelCase ,(MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(__UpperCamelCase ,__UpperCamelCase ):
SCREAMING_SNAKE_CASE : str = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(__UpperCamelCase )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
SCREAMING_SNAKE_CASE : Tuple = SeqaSeqDataset
# Get datasets
SCREAMING_SNAKE_CASE : str = (
dataset_class(
__UpperCamelCase ,type_path='train' ,data_dir=data_args.data_dir ,n_obs=data_args.n_train ,max_target_length=data_args.max_target_length ,max_source_length=data_args.max_source_length ,prefix=model.config.prefix or '' ,)
if training_args.do_train
else None
)
SCREAMING_SNAKE_CASE : Any = (
dataset_class(
__UpperCamelCase ,type_path='val' ,data_dir=data_args.data_dir ,n_obs=data_args.n_val ,max_target_length=data_args.val_max_target_length ,max_source_length=data_args.max_source_length ,prefix=model.config.prefix or '' ,)
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
SCREAMING_SNAKE_CASE : List[str] = (
dataset_class(
__UpperCamelCase ,type_path='test' ,data_dir=data_args.data_dir ,n_obs=data_args.n_test ,max_target_length=data_args.test_max_target_length ,max_source_length=data_args.max_source_length ,prefix=model.config.prefix or '' ,)
if training_args.do_predict
else None
)
# Initialize our Trainer
SCREAMING_SNAKE_CASE : Dict = (
build_compute_metrics_fn(data_args.task ,__UpperCamelCase ) if training_args.predict_with_generate else None
)
SCREAMING_SNAKE_CASE : List[Any] = SeqaSeqTrainer(
model=__UpperCamelCase ,args=__UpperCamelCase ,data_args=__UpperCamelCase ,train_dataset=__UpperCamelCase ,eval_dataset=__UpperCamelCase ,data_collator=SeqaSeqDataCollator(
__UpperCamelCase ,__UpperCamelCase ,model.config.decoder_start_token_id ,training_args.tpu_num_cores ) ,compute_metrics=__UpperCamelCase ,tokenizer=__UpperCamelCase ,)
SCREAMING_SNAKE_CASE : List[str] = {}
# Training
if training_args.do_train:
logger.info('*** Train ***' )
SCREAMING_SNAKE_CASE : Optional[Any] = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
SCREAMING_SNAKE_CASE : Optional[Any] = train_result.metrics
SCREAMING_SNAKE_CASE : Tuple = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics('train' ,__UpperCamelCase ,training_args.output_dir )
all_metrics.update(__UpperCamelCase )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir ,'trainer_state.json' ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
SCREAMING_SNAKE_CASE : Union[str, Any] = trainer.evaluate(metric_key_prefix='val' )
SCREAMING_SNAKE_CASE : int = data_args.n_val
SCREAMING_SNAKE_CASE : List[Any] = round(metrics['val_loss'] ,4 )
if trainer.is_world_process_zero():
handle_metrics('val' ,__UpperCamelCase ,training_args.output_dir )
all_metrics.update(__UpperCamelCase )
if training_args.do_predict:
logger.info('*** Predict ***' )
SCREAMING_SNAKE_CASE : str = trainer.predict(test_dataset=__UpperCamelCase ,metric_key_prefix='test' )
SCREAMING_SNAKE_CASE : Optional[int] = test_output.metrics
SCREAMING_SNAKE_CASE : Tuple = data_args.n_test
if trainer.is_world_process_zero():
SCREAMING_SNAKE_CASE : Optional[Any] = round(metrics['test_loss'] ,4 )
handle_metrics('test' ,__UpperCamelCase ,training_args.output_dir )
all_metrics.update(__UpperCamelCase )
if training_args.predict_with_generate:
SCREAMING_SNAKE_CASE : str = tokenizer.batch_decode(
test_output.predictions ,skip_special_tokens=__UpperCamelCase ,clean_up_tokenization_spaces=__UpperCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = lmap(str.strip ,__UpperCamelCase )
write_txt_file(__UpperCamelCase ,os.path.join(training_args.output_dir ,'test_generations.txt' ) )
if trainer.is_world_process_zero():
save_json(__UpperCamelCase ,os.path.join(training_args.output_dir ,'all_results.json' ) )
return all_metrics
def lowercase__( __UpperCamelCase: Any ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 508 | 1 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class a__( lowerCamelCase__ ):
lowercase__ = 42
class a__( nn.Module ):
def __init__( self : Union[str, Any] , __snake_case : List[Any]=3 , __snake_case : List[str]=3 , __snake_case : List[Any]=("DownEncoderBlock2D",) , __snake_case : List[str]=(64,) , __snake_case : Tuple=2 , __snake_case : List[Any]=32 , __snake_case : List[str]="silu" , __snake_case : List[str]=True , ):
super().__init__()
a : Optional[Any] = layers_per_block
a : Tuple = torch.nn.Convad(
__snake_case , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
a : str = None
a : Optional[int] = nn.ModuleList([] )
# down
a : int = block_out_channels[0]
for i, down_block_type in enumerate(__snake_case ):
a : Tuple = output_channel
a : List[str] = block_out_channels[i]
a : Optional[Any] = i == len(__snake_case ) - 1
a : Tuple = get_down_block(
__snake_case , num_layers=self.layers_per_block , in_channels=__snake_case , out_channels=__snake_case , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=__snake_case , resnet_groups=__snake_case , attention_head_dim=__snake_case , temb_channels=__snake_case , )
self.down_blocks.append(__snake_case )
# mid
a : int = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=__snake_case , output_scale_factor=1 , resnet_time_scale_shift='default' , attention_head_dim=block_out_channels[-1] , resnet_groups=__snake_case , temb_channels=__snake_case , )
# out
a : Union[str, Any] = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=__snake_case , eps=1e-6 )
a : str = nn.SiLU()
a : int = 2 * out_channels if double_z else out_channels
a : Tuple = nn.Convad(block_out_channels[-1] , __snake_case , 3 , padding=1 )
a : Optional[int] = False
def lowercase_ ( self : Tuple , __snake_case : Optional[Any] ):
a : Any = x
a : Dict = self.conv_in(__snake_case )
if self.training and self.gradient_checkpointing:
def create_custom_forward(__snake_case : Any ):
def custom_forward(*__snake_case : Optional[int] ):
return module(*__snake_case )
return custom_forward
# down
if is_torch_version('>=' , '1.11.0' ):
for down_block in self.down_blocks:
a : Tuple = torch.utils.checkpoint.checkpoint(
create_custom_forward(__snake_case ) , __snake_case , use_reentrant=__snake_case )
# middle
a : Dict = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __snake_case , use_reentrant=__snake_case )
else:
for down_block in self.down_blocks:
a : Tuple = torch.utils.checkpoint.checkpoint(create_custom_forward(__snake_case ) , __snake_case )
# middle
a : str = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , __snake_case )
else:
# down
for down_block in self.down_blocks:
a : List[str] = down_block(__snake_case )
# middle
a : Tuple = self.mid_block(__snake_case )
# post-process
a : Any = self.conv_norm_out(__snake_case )
a : Optional[int] = self.conv_act(__snake_case )
a : Optional[Any] = self.conv_out(__snake_case )
return sample
class a__( nn.Module ):
def __init__( self : Union[str, Any] , __snake_case : Union[str, Any]=3 , __snake_case : str=3 , __snake_case : List[str]=("UpDecoderBlock2D",) , __snake_case : Any=(64,) , __snake_case : Optional[int]=2 , __snake_case : Any=32 , __snake_case : Tuple="silu" , __snake_case : List[str]="group" , ):
super().__init__()
a : List[Any] = layers_per_block
a : Tuple = nn.Convad(
__snake_case , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
a : List[str] = None
a : Tuple = nn.ModuleList([] )
a : List[Any] = in_channels if norm_type == 'spatial' else None
# mid
a : Union[str, Any] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=__snake_case , output_scale_factor=1 , resnet_time_scale_shift='default' if norm_type == 'group' else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=__snake_case , temb_channels=__snake_case , )
# up
a : Any = list(reversed(__snake_case ) )
a : Optional[int] = reversed_block_out_channels[0]
for i, up_block_type in enumerate(__snake_case ):
a : Optional[Any] = output_channel
a : List[Any] = reversed_block_out_channels[i]
a : str = i == len(__snake_case ) - 1
a : Union[str, Any] = get_up_block(
__snake_case , num_layers=self.layers_per_block + 1 , in_channels=__snake_case , out_channels=__snake_case , prev_output_channel=__snake_case , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=__snake_case , resnet_groups=__snake_case , attention_head_dim=__snake_case , temb_channels=__snake_case , resnet_time_scale_shift=__snake_case , )
self.up_blocks.append(__snake_case )
a : List[str] = output_channel
# out
if norm_type == "spatial":
a : str = SpatialNorm(block_out_channels[0] , __snake_case )
else:
a : int = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=__snake_case , eps=1e-6 )
a : Optional[int] = nn.SiLU()
a : Optional[int] = nn.Convad(block_out_channels[0] , __snake_case , 3 , padding=1 )
a : Dict = False
def lowercase_ ( self : Optional[int] , __snake_case : Union[str, Any] , __snake_case : List[Any]=None ):
a : Optional[Any] = z
a : Optional[Any] = self.conv_in(__snake_case )
a : Union[str, Any] = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(__snake_case : Union[str, Any] ):
def custom_forward(*__snake_case : Optional[int] ):
return module(*__snake_case )
return custom_forward
if is_torch_version('>=' , '1.11.0' ):
# middle
a : str = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __snake_case , __snake_case , use_reentrant=__snake_case )
a : Union[str, Any] = sample.to(__snake_case )
# up
for up_block in self.up_blocks:
a : Any = torch.utils.checkpoint.checkpoint(
create_custom_forward(__snake_case ) , __snake_case , __snake_case , use_reentrant=__snake_case )
else:
# middle
a : List[Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __snake_case , __snake_case )
a : List[str] = sample.to(__snake_case )
# up
for up_block in self.up_blocks:
a : Tuple = torch.utils.checkpoint.checkpoint(create_custom_forward(__snake_case ) , __snake_case , __snake_case )
else:
# middle
a : Any = self.mid_block(__snake_case , __snake_case )
a : str = sample.to(__snake_case )
# up
for up_block in self.up_blocks:
a : Dict = up_block(__snake_case , __snake_case )
# post-process
if latent_embeds is None:
a : Dict = self.conv_norm_out(__snake_case )
else:
a : int = self.conv_norm_out(__snake_case , __snake_case )
a : List[str] = self.conv_act(__snake_case )
a : str = self.conv_out(__snake_case )
return sample
class a__( nn.Module ):
def __init__( self : Any , __snake_case : Optional[int] , __snake_case : Tuple , __snake_case : str , __snake_case : Any=None , __snake_case : Dict="random" , __snake_case : Tuple=False , __snake_case : int=True ):
super().__init__()
a : List[Any] = n_e
a : int = vq_embed_dim
a : Optional[int] = beta
a : Dict = legacy
a : List[Any] = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
a : int = remap
if self.remap is not None:
self.register_buffer('used' , torch.tensor(np.load(self.remap ) ) )
a : List[str] = self.used.shape[0]
a : Optional[Any] = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
a : Any = self.re_embed
a : Optional[int] = self.re_embed + 1
print(
F"""Remapping {self.n_e} indices to {self.re_embed} indices. """
F"""Using {self.unknown_index} for unknown indices.""" )
else:
a : Optional[Any] = n_e
a : Union[str, Any] = sane_index_shape
def lowercase_ ( self : Optional[int] , __snake_case : Optional[int] ):
a : Optional[int] = inds.shape
assert len(__snake_case ) > 1
a : Optional[int] = inds.reshape(ishape[0] , -1 )
a : str = self.used.to(__snake_case )
a : Union[str, Any] = (inds[:, :, None] == used[None, None, ...]).long()
a : List[Any] = match.argmax(-1 )
a : List[str] = match.sum(2 ) < 1
if self.unknown_index == "random":
a : Dict = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
a : Any = self.unknown_index
return new.reshape(__snake_case )
def lowercase_ ( self : List[Any] , __snake_case : List[str] ):
a : str = inds.shape
assert len(__snake_case ) > 1
a : List[Any] = inds.reshape(ishape[0] , -1 )
a : Dict = self.used.to(__snake_case )
if self.re_embed > self.used.shape[0]: # extra token
a : Optional[Any] = 0 # simply set to zero
a : Optional[int] = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , __snake_case )
return back.reshape(__snake_case )
def lowercase_ ( self : Tuple , __snake_case : Dict ):
# reshape z -> (batch, height, width, channel) and flatten
a : Union[str, Any] = z.permute(0 , 2 , 3 , 1 ).contiguous()
a : List[Any] = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
a : Union[str, Any] = torch.argmin(torch.cdist(__snake_case , self.embedding.weight ) , dim=1 )
a : int = self.embedding(__snake_case ).view(z.shape )
a : str = None
a : Optional[int] = None
# compute loss for embedding
if not self.legacy:
a : List[str] = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
a : Dict = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
a : Dict = z + (z_q - z).detach()
# reshape back to match original input shape
a : str = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
a : List[str] = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
a : List[Any] = self.remap_to_used(__snake_case )
a : int = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
a : Tuple = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def lowercase_ ( self : str , __snake_case : Optional[int] , __snake_case : Optional[int] ):
# shape specifying (batch, height, width, channel)
if self.remap is not None:
a : Optional[Any] = indices.reshape(shape[0] , -1 ) # add batch axis
a : Tuple = self.unmap_to_all(__snake_case )
a : Optional[int] = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
a : Union[str, Any] = self.embedding(__snake_case )
if shape is not None:
a : int = z_q.view(__snake_case )
# reshape back to match original input shape
a : List[str] = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class a__( lowerCamelCase__ ):
def __init__( self : Optional[Any] , __snake_case : Any , __snake_case : Any=False ):
a : Any = parameters
a , a : int = torch.chunk(__snake_case , 2 , dim=1 )
a : str = torch.clamp(self.logvar , -30.0 , 20.0 )
a : Dict = deterministic
a : Union[str, Any] = torch.exp(0.5 * self.logvar )
a : Optional[int] = torch.exp(self.logvar )
if self.deterministic:
a : Union[str, Any] = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def lowercase_ ( self : Union[str, Any] , __snake_case : Optional[torch.Generator] = None ):
# make sure sample is on the same device as the parameters and has same dtype
a : List[Any] = randn_tensor(
self.mean.shape , generator=__snake_case , device=self.parameters.device , dtype=self.parameters.dtype )
a : Optional[Any] = self.mean + self.std * sample
return x
def lowercase_ ( self : Dict , __snake_case : Tuple=None ):
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def lowercase_ ( self : Union[str, Any] , __snake_case : int , __snake_case : Dict=[1, 2, 3] ):
if self.deterministic:
return torch.Tensor([0.0] )
a : Optional[Any] = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=__snake_case )
def lowercase_ ( self : str ):
return self.mean | 526 |
'''simple docstring'''
from __future__ import annotations
lowerCAmelCase: str = 'Muhammad Umer Farooq'
lowerCAmelCase: List[str] = 'MIT'
lowerCAmelCase: Tuple = '1.0.0'
lowerCAmelCase: List[Any] = 'Muhammad Umer Farooq'
lowerCAmelCase: Optional[Any] = '[email protected]'
lowerCAmelCase: Dict = 'Alpha'
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class a__( lowerCamelCase__ ):
def __init__( self : Dict , __snake_case : str ):
super().__init__()
a : list[str] = []
a : List[Any] = domain
def lowercase_ ( self : Dict , __snake_case : str , __snake_case : list[tuple[str, str | None]] ):
# Only parse the 'anchor' tag.
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
a : Tuple = parse.urljoin(self.domain , __snake_case )
self.urls.append(__snake_case )
def lowerCamelCase__ ( _A ):
return ".".join(get_sub_domain_name(_A ).split('.' )[-2:] )
def lowerCamelCase__ ( _A ):
return parse.urlparse(_A ).netloc
def lowerCamelCase__ ( _A = "https://github.com" ):
a : Any = get_domain_name(_A )
# Initialize the parser
a : Tuple = Parser(_A )
try:
# Open URL
a : List[Any] = requests.get(_A )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
a : Union[str, Any] = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
a : int = requests.get(_A )
# Get the valid email.
a : Optional[Any] = re.findall('[a-zA-Z0-9]+@' + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(_A )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(_A )
if __name__ == "__main__":
lowerCAmelCase: Any = emails_from_url('https://github.com')
print(F"{len(emails)} emails found:")
print('\n'.join(sorted(emails))) | 526 | 1 |
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def A_ ( __a : Optional[Any] , __a : Any , __a : Dict ):
"""simple docstring"""
a__ = 1.5
a__ = int(factor * num_class_images )
a__ = ClipClient(
url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=__a , aesthetic_weight=0.1 )
os.makedirs(F'''{class_data_dir}/images''' , exist_ok=__a )
if len(list(Path(F'''{class_data_dir}/images''' ).iterdir() ) ) >= num_class_images:
return
while True:
a__ = client.query(text=__a )
if len(__a ) >= factor * num_class_images or num_images > 1e4:
break
else:
a__ = int(factor * num_images )
a__ = ClipClient(
url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=__a , aesthetic_weight=0.1 , )
a__ = 0
a__ = 0
a__ = tqdm(desc="""downloading real regularization images""" , total=__a )
with open(F'''{class_data_dir}/caption.txt''' , """w""" ) as fa, open(F'''{class_data_dir}/urls.txt''' , """w""" ) as fa, open(
F'''{class_data_dir}/images.txt''' , """w""" ) as fa:
while total < num_class_images:
a__ = class_images[count]
count += 1
try:
a__ = requests.get(images["""url"""] )
if img.status_code == 200:
a__ = Image.open(BytesIO(img.content ) )
with open(F'''{class_data_dir}/images/{total}.jpg''' , """wb""" ) as f:
f.write(img.content )
fa.write(images["""caption"""] + """\n""" )
fa.write(images["""url"""] + """\n""" )
fa.write(F'''{class_data_dir}/images/{total}.jpg''' + """\n""" )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def A_ ( ):
"""simple docstring"""
a__ = argparse.ArgumentParser("""""" , add_help=__a )
parser.add_argument("""--class_prompt""" , help="""text prompt to retrieve images""" , required=__a , type=__a )
parser.add_argument("""--class_data_dir""" , help="""path to save images""" , required=__a , type=__a )
parser.add_argument("""--num_class_images""" , help="""number of images to download""" , default=200 , type=__a )
return parser.parse_args()
if __name__ == "__main__":
UpperCAmelCase = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 351 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class __snake_case ( unittest.TestCase):
'''simple docstring'''
def _a ( self ):
a__ = tempfile.mkdtemp()
# fmt: off
a__ = ["""""", """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
a__ = dict(zip(a_ , range(len(a_ ) ) ) )
a__ = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
a__ = {"""unk_token""": """<unk>"""}
a__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
a__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(a_ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(a_ ) )
a__ = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"""image_std""": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
a__ = os.path.join(self.tmpdirname , a_ )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(a_ , a_ )
def _a ( self , **a_ ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token="""!""" , **a_ )
def _a ( self , **a_ ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token="""!""" , **a_ )
def _a ( self , **a_ ):
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **a_ )
def _a ( self ):
shutil.rmtree(self.tmpdirname )
def _a ( self ):
a__ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
a__ = [Image.fromarray(np.moveaxis(a_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _a ( self ):
a__ = self.get_tokenizer()
a__ = self.get_rust_tokenizer()
a__ = self.get_image_processor()
a__ = OwlViTProcessor(tokenizer=a_ , image_processor=a_ )
processor_slow.save_pretrained(self.tmpdirname )
a__ = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=a_ )
a__ = OwlViTProcessor(tokenizer=a_ , image_processor=a_ )
processor_fast.save_pretrained(self.tmpdirname )
a__ = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , a_ )
self.assertIsInstance(processor_fast.tokenizer , a_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , a_ )
self.assertIsInstance(processor_fast.image_processor , a_ )
def _a ( self ):
a__ = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
a__ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
a__ = self.get_image_processor(do_normalize=a_ )
a__ = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=a_ )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , a_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , a_ )
def _a ( self ):
a__ = self.get_image_processor()
a__ = self.get_tokenizer()
a__ = OwlViTProcessor(tokenizer=a_ , image_processor=a_ )
a__ = self.prepare_image_inputs()
a__ = image_processor(a_ , return_tensors="""np""" )
a__ = processor(images=a_ , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _a ( self ):
a__ = self.get_image_processor()
a__ = self.get_tokenizer()
a__ = OwlViTProcessor(tokenizer=a_ , image_processor=a_ )
a__ = """lower newer"""
a__ = processor(text=a_ , return_tensors="""np""" )
a__ = tokenizer(a_ , return_tensors="""np""" )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def _a ( self ):
a__ = self.get_image_processor()
a__ = self.get_tokenizer()
a__ = OwlViTProcessor(tokenizer=a_ , image_processor=a_ )
a__ = """lower newer"""
a__ = self.prepare_image_inputs()
a__ = processor(text=a_ , images=a_ )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(a_ ):
processor()
def _a ( self ):
a__ = """google/owlvit-base-patch32"""
a__ = OwlViTProcessor.from_pretrained(a_ )
a__ = ["""cat""", """nasa badge"""]
a__ = processor(text=a_ )
a__ = 16
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask"""] )
self.assertEqual(inputs["""input_ids"""].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(a_ ):
processor()
def _a ( self ):
a__ = """google/owlvit-base-patch32"""
a__ = OwlViTProcessor.from_pretrained(a_ )
a__ = [["""cat""", """nasa badge"""], ["""person"""]]
a__ = processor(text=a_ )
a__ = 16
a__ = len(a_ )
a__ = max([len(a_ ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask"""] )
self.assertEqual(inputs["""input_ids"""].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(a_ ):
processor()
def _a ( self ):
a__ = """google/owlvit-base-patch32"""
a__ = OwlViTProcessor.from_pretrained(a_ )
a__ = ["""cat""", """nasa badge"""]
a__ = processor(text=a_ )
a__ = 16
a__ = inputs["""input_ids"""]
a__ = [
[49_406, 2_368, 49_407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[49_406, 6_841, 11_301, 49_407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask"""] )
self.assertEqual(inputs["""input_ids"""].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def _a ( self ):
a__ = self.get_image_processor()
a__ = self.get_tokenizer()
a__ = OwlViTProcessor(tokenizer=a_ , image_processor=a_ )
a__ = self.prepare_image_inputs()
a__ = self.prepare_image_inputs()
a__ = processor(images=a_ , query_images=a_ )
self.assertListEqual(list(inputs.keys() ) , ["""query_pixel_values""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(a_ ):
processor()
def _a ( self ):
a__ = self.get_image_processor()
a__ = self.get_tokenizer()
a__ = OwlViTProcessor(tokenizer=a_ , image_processor=a_ )
a__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a__ = processor.batch_decode(a_ )
a__ = tokenizer.batch_decode(a_ )
self.assertListEqual(a_ , a_ )
| 351 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase_ : Optional[int] = {
"configuration_conditional_detr": [
"CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ConditionalDetrConfig",
"ConditionalDetrOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[Any] = ["ConditionalDetrFeatureExtractor"]
UpperCAmelCase_ : Any = ["ConditionalDetrImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Tuple = [
"CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConditionalDetrForObjectDetection",
"ConditionalDetrForSegmentation",
"ConditionalDetrModel",
"ConditionalDetrPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 120 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
UpperCAmelCase = {
'''configuration_longt5''': ['''LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LongT5Config''', '''LongT5OnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LongT5EncoderModel''',
'''LongT5ForConditionalGeneration''',
'''LongT5Model''',
'''LongT5PreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''FlaxLongT5ForConditionalGeneration''',
'''FlaxLongT5Model''',
'''FlaxLongT5PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 677 | 0 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(__lowerCamelCase ) , 'Tatoeba directory does not exist.' )
class lowercase ( unittest.TestCase ):
@cached_property
def __UpperCAmelCase ( self : Union[str, Any]) -> str:
lowercase_ = tempfile.mkdtemp()
return TatoebaConverter(save_dir=__lowerCAmelCase)
@slow
def __UpperCAmelCase ( self : Tuple) -> List[Any]:
self.resolver.convert_models(["heb-eng"])
@slow
def __UpperCAmelCase ( self : Union[str, Any]) -> Tuple:
lowercase_ , lowercase_ = self.resolver.write_model_card("opus-mt-he-en" , dry_run=__lowerCAmelCase)
assert mmeta["long_pair"] == "heb-eng"
| 461 | '''simple docstring'''
def __a ( __lowerCamelCase : int = 600_851_475_143 ) -> int:
'''simple docstring'''
try:
lowercase_ = int(__lowerCamelCase )
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or castable to int." )
if n <= 0:
raise ValueError("Parameter n must be greater than or equal to one." )
lowercase_ = 1
lowercase_ = 2
while i * i <= n:
while n % i == 0:
lowercase_ = i
n //= i
i += 1
if n > 1:
lowercase_ = n
return int(__lowerCamelCase )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 461 | 1 |
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class __lowercase ( A ):
def __init__( self , a__=0.01 , a__=1_0_0_0 ) -> Optional[int]:
'''simple docstring'''
A_ = p_stop
A_ = max_length
def __iter__( self ) -> Dict:
'''simple docstring'''
A_ = 0
A_ = False
while not stop and count < self.max_length:
yield count
count += 1
A_ = random.random() < self.p_stop
class __lowercase ( unittest.TestCase ):
def lowerCAmelCase_ ( self , a__ , a__ , a__=False , a__=True ) -> Union[str, Any]:
'''simple docstring'''
A_ = [
BatchSamplerShard(a__ , 2 , a__ , split_batches=a__ , even_batches=a__ )
for i in range(2 )
]
A_ = [list(a__ ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(a__ ) for shard in batch_sampler_shards] , [len(a__ ) for e in expected] )
self.assertListEqual(a__ , a__ )
def lowerCAmelCase_ ( self ) -> List[Any]:
'''simple docstring'''
# Check the shards when the dataset is a round multiple of total batch size.
A_ = BatchSampler(range(2_4 ) , batch_size=3 , drop_last=a__ )
A_ = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1, 2_2, 2_3]],
]
self.check_batch_sampler_shards(a__ , a__ )
A_ = BatchSampler(range(2_4 ) , batch_size=3 , drop_last=a__ )
# Expected shouldn't change
self.check_batch_sampler_shards(a__ , a__ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
A_ = BatchSampler(range(2_1 ) , batch_size=3 , drop_last=a__ )
A_ = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [0, 1, 2]],
]
self.check_batch_sampler_shards(a__ , a__ )
A_ = BatchSampler(range(2_1 ) , batch_size=3 , drop_last=a__ )
A_ = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(a__ , a__ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
A_ = BatchSampler(range(2_2 ) , batch_size=3 , drop_last=a__ )
A_ = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1, 0, 1]],
]
self.check_batch_sampler_shards(a__ , a__ )
A_ = BatchSampler(range(2_2 ) , batch_size=3 , drop_last=a__ )
A_ = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(a__ , a__ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
A_ = BatchSampler(range(2_0 ) , batch_size=3 , drop_last=a__ )
A_ = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [1, 2, 3]],
]
self.check_batch_sampler_shards(a__ , a__ )
A_ = BatchSampler(range(2_0 ) , batch_size=3 , drop_last=a__ )
A_ = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(a__ , a__ )
# Check the shards when the dataset is very small.
A_ = BatchSampler(range(2 ) , batch_size=3 , drop_last=a__ )
A_ = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(a__ , a__ )
A_ = BatchSampler(range(2 ) , batch_size=3 , drop_last=a__ )
A_ = [[], []]
self.check_batch_sampler_shards(a__ , a__ )
def lowerCAmelCase_ ( self ) -> List[str]:
'''simple docstring'''
# Check the shards when the dataset is a round multiple of batch size.
A_ = BatchSampler(range(2_4 ) , batch_size=4 , drop_last=a__ )
A_ = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [2_2, 2_3]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
A_ = BatchSampler(range(2_4 ) , batch_size=4 , drop_last=a__ )
# Expected shouldn't change
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
# Check the shards when the dataset is not a round multiple of batch size.
A_ = BatchSampler(range(2_2 ) , batch_size=4 , drop_last=a__ )
A_ = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [0, 1]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
A_ = BatchSampler(range(2_2 ) , batch_size=4 , drop_last=a__ )
A_ = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
A_ = BatchSampler(range(2_1 ) , batch_size=4 , drop_last=a__ )
A_ = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 0]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [1, 2]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
A_ = BatchSampler(range(2_1 ) , batch_size=4 , drop_last=a__ )
A_ = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
# Check the shards when the dataset is very small.
A_ = BatchSampler(range(2 ) , batch_size=4 , drop_last=a__ )
A_ = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
A_ = BatchSampler(range(2 ) , batch_size=4 , drop_last=a__ )
A_ = [[], []]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
def lowerCAmelCase_ ( self ) -> Optional[Any]:
'''simple docstring'''
# Check the shards when the dataset is a round multiple of total batch size.
A_ = BatchSampler(range(2_4 ) , batch_size=3 , drop_last=a__ )
A_ = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1, 2_2, 2_3]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
A_ = BatchSampler(range(2_4 ) , batch_size=3 , drop_last=a__ )
# Expected shouldn't change
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
A_ = BatchSampler(range(2_1 ) , batch_size=3 , drop_last=a__ )
A_ = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
A_ = BatchSampler(range(2_1 ) , batch_size=3 , drop_last=a__ )
A_ = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
A_ = BatchSampler(range(2_2 ) , batch_size=3 , drop_last=a__ )
A_ = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
A_ = BatchSampler(range(2_2 ) , batch_size=3 , drop_last=a__ )
A_ = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
A_ = BatchSampler(range(2_0 ) , batch_size=3 , drop_last=a__ )
A_ = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
A_ = BatchSampler(range(2_0 ) , batch_size=3 , drop_last=a__ )
A_ = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
# Check the shards when the dataset is very small.
A_ = BatchSampler(range(2 ) , batch_size=3 , drop_last=a__ )
A_ = [[[0, 1]], []]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
A_ = BatchSampler(range(2 ) , batch_size=3 , drop_last=a__ )
A_ = [[], []]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
def lowerCAmelCase_ ( self ) -> str:
'''simple docstring'''
# Check the shards when the dataset is a round multiple of batch size.
A_ = BatchSampler(range(2_4 ) , batch_size=4 , drop_last=a__ )
A_ = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [2_2, 2_3]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
A_ = BatchSampler(range(2_4 ) , batch_size=4 , drop_last=a__ )
# Expected shouldn't change
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
# Check the shards when the dataset is not a round multiple of batch size.
A_ = BatchSampler(range(2_2 ) , batch_size=4 , drop_last=a__ )
A_ = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
A_ = BatchSampler(range(2_2 ) , batch_size=4 , drop_last=a__ )
A_ = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
A_ = BatchSampler(range(2_1 ) , batch_size=4 , drop_last=a__ )
A_ = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
A_ = BatchSampler(range(2_1 ) , batch_size=4 , drop_last=a__ )
A_ = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
# Check the shards when the dataset is very small.
A_ = BatchSampler(range(2 ) , batch_size=4 , drop_last=a__ )
A_ = [[[0, 1]], []]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
A_ = BatchSampler(range(2 ) , batch_size=4 , drop_last=a__ )
A_ = [[], []]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
def lowerCAmelCase_ ( self ) -> Any:
'''simple docstring'''
A_ = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 1_0, 1_1], [1_2, 1_3]]
A_ = [BatchSamplerShard(a__ , 2 , a__ , even_batches=a__ ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [1_2, 1_3]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 1_0, 1_1]] )
def lowerCAmelCase_ ( self , a__ , a__ , a__ , a__=False , a__=2 , a__=False ) -> Union[str, Any]:
'''simple docstring'''
random.seed(a__ )
A_ = list(a__ )
A_ = [
IterableDatasetShard(
a__ , batch_size=a__ , drop_last=a__ , num_processes=a__ , process_index=a__ , split_batches=a__ , )
for i in range(a__ )
]
A_ = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(a__ )
iterable_dataset_lists.append(list(a__ ) )
A_ = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
A_ = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(a__ ) , len(a__ ) )
self.assertTrue(len(a__ ) % shard_batch_size == 0 )
A_ = []
for idx in range(0 , len(a__ ) , a__ ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(a__ ) < len(a__ ):
reference += reference
self.assertListEqual(a__ , reference[: len(a__ )] )
def lowerCAmelCase_ ( self ) -> Optional[Any]:
'''simple docstring'''
A_ = 4_2
A_ = RandomIterableDataset()
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
# Edge case with a very small dataset
A_ = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
def lowerCAmelCase_ ( self ) -> str:
'''simple docstring'''
A_ = BatchSampler(range(1_6 ) , batch_size=4 , drop_last=a__ )
A_ = SkipBatchSampler(a__ , 2 )
self.assertListEqual(list(a__ ) , [[8, 9, 1_0, 1_1], [1_2, 1_3, 1_4, 1_5]] )
def lowerCAmelCase_ ( self ) -> Tuple:
'''simple docstring'''
A_ = SkipDataLoader(list(range(1_6 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 1_0, 1_1], [1_2, 1_3, 1_4, 1_5]] )
def lowerCAmelCase_ ( self ) -> List[Any]:
'''simple docstring'''
A_ = DataLoader(list(range(1_6 ) ) , batch_size=4 )
A_ = skip_first_batches(a__ , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 1_0, 1_1], [1_2, 1_3, 1_4, 1_5]] )
def lowerCAmelCase_ ( self ) -> Tuple:
'''simple docstring'''
A_ = DataLoaderShard(list(range(1_6 ) ) , batch_size=4 )
for idx, _ in enumerate(a__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(a__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def lowerCAmelCase_ ( self ) -> Optional[int]:
'''simple docstring'''
Accelerator()
A_ = DataLoaderDispatcher(range(1_6 ) , batch_size=4 )
for idx, _ in enumerate(a__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(a__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) | 141 |
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def lowerCamelCase_ ( __UpperCamelCase = "" ):
A_ = url or '''https://www.imdb.com/chart/top/?ref_=nv_mv_250'''
A_ = BeautifulSoup(requests.get(__UpperCamelCase ).text , '''html.parser''' )
A_ = soup.find_all('''td''' , attrs='''titleColumn''' )
A_ = soup.find_all('''td''' , class_='''ratingColumn imdbRating''' )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(__UpperCamelCase , __UpperCamelCase )
}
def lowerCamelCase_ ( __UpperCamelCase = "IMDb_Top_250_Movies.csv" ):
A_ = get_imdb_top_aaa_movies()
with open(__UpperCamelCase , '''w''' , newline='''''' ) as out_file:
A_ = csv.writer(__UpperCamelCase )
writer.writerow(['''Movie title''', '''IMDb rating'''] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies() | 141 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class lowerCamelCase ( SCREAMING_SNAKE_CASE ):
UpperCAmelCase : int = 'philschmid/bart-large-cnn-samsum'
UpperCAmelCase : List[Any] = (
'This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, '
'and returns a summary of the text.'
)
UpperCAmelCase : str = 'summarizer'
UpperCAmelCase : Union[str, Any] = AutoTokenizer
UpperCAmelCase : Union[str, Any] = AutoModelForSeqaSeqLM
UpperCAmelCase : List[Any] = ['text']
UpperCAmelCase : List[str] = ['text']
def snake_case_ ( self : Dict , __snake_case : Tuple ) -> Tuple:
return self.pre_processor(__snake_case , return_tensors='''pt''' , truncation=__snake_case )
def snake_case_ ( self : Optional[Any] , __snake_case : int ) -> List[str]:
return self.model.generate(**__snake_case )[0]
def snake_case_ ( self : List[str] , __snake_case : int ) -> List[str]:
return self.pre_processor.decode(__snake_case , skip_special_tokens=__snake_case , clean_up_tokenization_spaces=__snake_case )
| 721 |
import requests
from bsa import BeautifulSoup
def lowerCamelCase_ ( UpperCamelCase_ = "AAPL" ):
_a : List[str] = f"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"""
_a : Any = BeautifulSoup(requests.get(UpperCamelCase_ ).text , '''html.parser''' )
_a : Optional[int] = '''My(6px) Pos(r) smartphone_Mt(6px)'''
return soup.find('''div''' , class_=class_ ).find('''span''' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''')
| 249 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowercase__ : Optional[int] = {
'''configuration_xlm''': ['''XLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLMConfig''', '''XLMOnnxConfig'''],
'''tokenization_xlm''': ['''XLMTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Union[str, Any] = [
'''XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMForMultipleChoice''',
'''XLMForQuestionAnswering''',
'''XLMForQuestionAnsweringSimple''',
'''XLMForSequenceClassification''',
'''XLMForTokenClassification''',
'''XLMModel''',
'''XLMPreTrainedModel''',
'''XLMWithLMHeadModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Dict = [
'''TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMForMultipleChoice''',
'''TFXLMForQuestionAnsweringSimple''',
'''TFXLMForSequenceClassification''',
'''TFXLMForTokenClassification''',
'''TFXLMMainLayer''',
'''TFXLMModel''',
'''TFXLMPreTrainedModel''',
'''TFXLMWithLMHeadModel''',
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
lowercase__ : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 8 | """simple docstring"""
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
_UpperCamelCase = logging.get_logger(__name__)
class __UpperCAmelCase (__A ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = ['input_features', 'is_longer']
def __init__( self , snake_case_=64 , snake_case_=48_000 , snake_case_=480 , snake_case_=10 , snake_case_=1_024 , snake_case_=0.0 , snake_case_=False , snake_case_ = 0 , snake_case_ = 14_000 , snake_case_ = None , snake_case_ = "fusion" , snake_case_ = "repeatpad" , **snake_case_ , ):
'''simple docstring'''
super().__init__(
feature_size=snake_case_ , sampling_rate=snake_case_ , padding_value=snake_case_ , return_attention_mask=snake_case_ , **snake_case_ , )
A__ : List[str] = top_db
A__ : Optional[int] = truncation
A__ : str = padding
A__ : str = fft_window_size
A__ : Any = (fft_window_size >> 1) + 1
A__ : Optional[Any] = hop_length
A__ : Any = max_length_s
A__ : int = max_length_s * sampling_rate
A__ : Any = sampling_rate
A__ : List[str] = frequency_min
A__ : int = frequency_max
A__ : Tuple = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=snake_case_ , min_frequency=snake_case_ , max_frequency=snake_case_ , sampling_rate=snake_case_ , norm=snake_case_ , mel_scale="""htk""" , )
A__ : List[Any] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=snake_case_ , min_frequency=snake_case_ , max_frequency=snake_case_ , sampling_rate=snake_case_ , norm="""slaney""" , mel_scale="""slaney""" , )
def lowerCamelCase ( self ):
'''simple docstring'''
A__ : List[Any] = copy.deepcopy(self.__dict__ )
A__ : int = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def lowerCamelCase ( self , snake_case_ , snake_case_ = None ):
'''simple docstring'''
A__ : Any = spectrogram(
snake_case_ , window_function(self.fft_window_size , """hann""" ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=snake_case_ , log_mel="""dB""" , )
return log_mel_spectrogram.T
def lowerCamelCase ( self , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
A__ : Union[str, Any] = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
A__ : Optional[Any] = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
A__ : str = [0]
# randomly choose index for each part
A__ : int = np.random.choice(ranges[0] )
A__ : Dict = np.random.choice(ranges[1] )
A__ : Dict = np.random.choice(ranges[2] )
A__ : Any = mel[idx_front : idx_front + chunk_frames, :]
A__ : Optional[int] = mel[idx_middle : idx_middle + chunk_frames, :]
A__ : Optional[int] = mel[idx_back : idx_back + chunk_frames, :]
A__ : Dict = torch.tensor(mel[None, None, :] )
A__ : Dict = torch.nn.functional.interpolate(
snake_case_ , size=[chunk_frames, 64] , mode="""bilinear""" , align_corners=snake_case_ )
A__ : List[Any] = mel_shrink[0][0].numpy()
A__ : Dict = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def lowerCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
A__ : int = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
A__ : Tuple = len(snake_case_ ) - max_length
A__ : Any = np.random.randint(0 , overflow + 1 )
A__ : int = waveform[idx : idx + max_length]
A__ : List[str] = self._np_extract_fbank_features(snake_case_ , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
A__ : Optional[int] = self._np_extract_fbank_features(snake_case_ , self.mel_filters )
A__ : Optional[int] = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
A__ : int = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
A__ : Union[str, Any] = np.stack([mel, mel, mel, mel] , axis=0 )
A__ : Dict = False
else:
A__ : List[Any] = self._random_mel_fusion(snake_case_ , snake_case_ , snake_case_ )
A__ : Optional[Any] = True
else:
raise NotImplementedError(F'''data_truncating {truncation} not implemented''' )
else:
A__ : Tuple = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
A__ : Union[str, Any] = int(max_length / len(snake_case_ ) )
A__ : List[Any] = np.stack(np.tile(snake_case_ , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
A__ : List[Any] = int(max_length / len(snake_case_ ) )
A__ : Tuple = np.stack(np.tile(snake_case_ , snake_case_ ) )
A__ : Tuple = np.pad(snake_case_ , (0, max_length - waveform.shape[0]) , mode="""constant""" , constant_values=0 )
if truncation == "fusion":
A__ : str = self._np_extract_fbank_features(snake_case_ , self.mel_filters )
A__ : List[Any] = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
A__ : List[Any] = self._np_extract_fbank_features(snake_case_ , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self , snake_case_ , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , **snake_case_ , ):
'''simple docstring'''
A__ : Optional[Any] = truncation if truncation is not None else self.truncation
A__ : Union[str, Any] = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
F''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
F''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
A__ : List[Any] = isinstance(snake_case_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
A__ : int = is_batched_numpy or (
isinstance(snake_case_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
A__ : Any = [np.asarray(snake_case_ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(snake_case_ , np.ndarray ):
A__ : Any = np.asarray(snake_case_ , dtype=np.floataa )
elif isinstance(snake_case_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
A__ : Optional[int] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
A__ : Union[str, Any] = [np.asarray(snake_case_ )]
# convert to mel spectrogram, truncate and pad if needed.
A__ : Optional[Any] = [
self._get_input_mel(snake_case_ , max_length if max_length else self.nb_max_samples , snake_case_ , snake_case_ )
for waveform in raw_speech
]
A__ : List[str] = []
A__ : Optional[Any] = []
for mel, longer in padded_inputs:
input_mel.append(snake_case_ )
is_longer.append(snake_case_ )
if truncation == "fusion" and sum(snake_case_ ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
A__ : Optional[Any] = np.random.randint(0 , len(snake_case_ ) )
A__ : str = True
if isinstance(input_mel[0] , snake_case_ ):
A__ : Optional[Any] = [np.asarray(snake_case_ , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
A__ : Union[str, Any] = [[longer] for longer in is_longer]
A__ : str = {"""input_features""": input_mel, """is_longer""": is_longer}
A__ : Optional[int] = BatchFeature(snake_case_ )
if return_tensors is not None:
A__ : Dict = input_features.convert_to_tensors(snake_case_ )
return input_features
| 363 | 0 |
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
infer_framework,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
get_torch_version,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bsa_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_bfaa_cpu_available,
is_torch_bfaa_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
)
_snake_case : Dict = 'pytorch_model.bin'
_snake_case : Any = 'pytorch_model.bin.index.json'
_snake_case : Dict = 'adapter_config.json'
_snake_case : Any = 'adapter_model.bin'
_snake_case : Union[str, Any] = 'adapter_model.safetensors'
_snake_case : List[str] = 'tf_model.h5'
_snake_case : str = 'tf_model.h5.index.json'
_snake_case : List[Any] = 'model.ckpt'
_snake_case : Optional[Any] = 'flax_model.msgpack'
_snake_case : Optional[int] = 'flax_model.msgpack.index.json'
_snake_case : str = 'model.safetensors'
_snake_case : Union[str, Any] = 'model.safetensors.index.json'
_snake_case : Any = 'config.json'
_snake_case : int = 'preprocessor_config.json'
_snake_case : int = FEATURE_EXTRACTOR_NAME
_snake_case : str = 'generation_config.json'
_snake_case : str = 'modelcard.json'
_snake_case : Tuple = '▁'
_snake_case : Dict = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
_snake_case : str = [
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
_snake_case : List[Any] = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
_snake_case : int = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def a_ ( lowerCAmelCase_ : Dict ):
if version.parse(lowerCAmelCase_ ) < version.parse(lowerCAmelCase_ ):
if "dev" in min_version:
__lowerCAmelCase = (
'This example requires a source install from HuggingFace Transformers (see '
'`https://huggingface.co/docs/transformers/installation#install-from-source`),'
)
else:
__lowerCAmelCase = F"""This example requires a minimum version of {min_version},"""
error_message += F""" but the version found is {__version__}.\n"""
raise ImportError(
error_message
+ 'Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other '
'versions of HuggingFace Transformers.' )
| 421 |
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def a_ ( lowerCAmelCase_ : Features ):
__lowerCAmelCase = np.inf
def set_batch_size(lowerCAmelCase_ : FeatureType ) -> None:
nonlocal batch_size
if isinstance(lowerCAmelCase_, lowerCAmelCase_ ):
__lowerCAmelCase = min(lowerCAmelCase_, config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(lowerCAmelCase_, lowerCAmelCase_ ):
__lowerCAmelCase = min(lowerCAmelCase_, config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(lowerCAmelCase_, lowerCAmelCase_ ) and feature.dtype == "binary":
__lowerCAmelCase = min(lowerCAmelCase_, config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(lowerCAmelCase_, lowerCAmelCase_ )
return None if batch_size is np.inf else batch_size
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
def __init__( self : List[Any] , lowerCAmelCase_ : NestedDataStructureLike[PathLike] , lowerCAmelCase_ : Optional[NamedSplit] = None , lowerCAmelCase_ : Optional[Features] = None , lowerCAmelCase_ : str = None , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : Optional[int] = None , **lowerCAmelCase_ : int , ) -> str:
super().__init__(
lowerCAmelCase_ , split=lowerCAmelCase_ , features=lowerCAmelCase_ , cache_dir=lowerCAmelCase_ , keep_in_memory=lowerCAmelCase_ , streaming=lowerCAmelCase_ , num_proc=lowerCAmelCase_ , **lowerCAmelCase_ , )
__lowerCAmelCase = path_or_paths if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else {self.split: path_or_paths}
__lowerCAmelCase = _PACKAGED_DATASETS_MODULES['parquet'][1]
__lowerCAmelCase = Parquet(
cache_dir=lowerCAmelCase_ , data_files=lowerCAmelCase_ , features=lowerCAmelCase_ , hash=lowerCAmelCase_ , **lowerCAmelCase_ , )
def lowercase ( self : Optional[int] ) -> int:
# Build iterable dataset
if self.streaming:
__lowerCAmelCase = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
self.builder.download_and_prepare(
download_config=lowerCAmelCase_ , download_mode=lowerCAmelCase_ , verification_mode=lowerCAmelCase_ , base_path=lowerCAmelCase_ , num_proc=self.num_proc , )
__lowerCAmelCase = self.builder.as_dataset(
split=self.split , verification_mode=lowerCAmelCase_ , in_memory=self.keep_in_memory )
return dataset
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : int , lowerCAmelCase_ : Dataset , lowerCAmelCase_ : Union[PathLike, BinaryIO] , lowerCAmelCase_ : Optional[int] = None , **lowerCAmelCase_ : int , ) -> Any:
__lowerCAmelCase = dataset
__lowerCAmelCase = path_or_buf
__lowerCAmelCase = batch_size or get_writer_batch_size(dataset.features )
__lowerCAmelCase = parquet_writer_kwargs
def lowercase ( self : Optional[int] ) -> int:
__lowerCAmelCase = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with open(self.path_or_buf , 'wb+' ) as buffer:
__lowerCAmelCase = self._write(file_obj=lowerCAmelCase_ , batch_size=lowerCAmelCase_ , **self.parquet_writer_kwargs )
else:
__lowerCAmelCase = self._write(file_obj=self.path_or_buf , batch_size=lowerCAmelCase_ , **self.parquet_writer_kwargs )
return written
def lowercase ( self : List[Any] , lowerCAmelCase_ : BinaryIO , lowerCAmelCase_ : int , **lowerCAmelCase_ : Optional[int] ) -> int:
__lowerCAmelCase = 0
__lowerCAmelCase = parquet_writer_kwargs.pop('path_or_buf' , lowerCAmelCase_ )
__lowerCAmelCase = self.dataset.features.arrow_schema
__lowerCAmelCase = pq.ParquetWriter(lowerCAmelCase_ , schema=lowerCAmelCase_ , **lowerCAmelCase_ )
for offset in logging.tqdm(
range(0 , len(self.dataset ) , lowerCAmelCase_ ) , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating parquet from Arrow format' , ):
__lowerCAmelCase = query_table(
table=self.dataset._data , key=slice(lowerCAmelCase_ , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(lowerCAmelCase_ )
written += batch.nbytes
writer.close()
return written
| 421 | 1 |
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class lowercase ( enum.Enum ):
"""simple docstring"""
_UpperCamelCase : str = 0
_UpperCamelCase : str = 1
_UpperCamelCase : Union[str, Any] = 2
@add_end_docstrings(a_ )
class lowercase ( a_ ):
"""simple docstring"""
_UpperCamelCase : Optional[int] = "\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n "
def __init__( self : Union[str, Any] , *lowerCamelCase_ : Union[str, Any] , **lowerCamelCase_ : Any ):
'''simple docstring'''
super().__init__(*lowerCamelCase_ , **lowerCamelCase_ )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == 'tf' else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
_snake_case : Tuple = None
if self.model.config.prefix is not None:
_snake_case : Union[str, Any] = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
_snake_case : str = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
_snake_case , _snake_case , _snake_case : List[str] = self._sanitize_parameters(prefix=lowerCamelCase_ , **self._forward_params )
_snake_case : Optional[int] = {**self._preprocess_params, **preprocess_params}
_snake_case : Any = {**self._forward_params, **forward_params}
def __UpperCAmelCase ( self : int , lowerCamelCase_ : str=None , lowerCamelCase_ : Dict=None , lowerCamelCase_ : List[str]=None , lowerCamelCase_ : int=None , lowerCamelCase_ : List[str]=None , lowerCamelCase_ : Dict=None , lowerCamelCase_ : Optional[Any]=None , lowerCamelCase_ : Dict=None , **lowerCamelCase_ : List[Any] , ):
'''simple docstring'''
_snake_case : str = {}
if prefix is not None:
_snake_case : List[Any] = prefix
if prefix:
_snake_case : int = self.tokenizer(
lowerCamelCase_ , padding=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , return_tensors=self.framework )
_snake_case : str = prefix_inputs['input_ids'].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f'''{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected'''
' [None, \'hole\']' )
_snake_case : List[Any] = handle_long_generation
preprocess_params.update(lowerCamelCase_ )
_snake_case : Optional[int] = generate_kwargs
_snake_case : Tuple = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_full_text`' )
if return_tensors is not None:
raise ValueError('`return_full_text` is mutually exclusive with `return_tensors`' )
_snake_case : Dict = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_tensors`' )
_snake_case : Optional[int] = ReturnType.TENSORS
if return_type is not None:
_snake_case : Any = return_type
if clean_up_tokenization_spaces is not None:
_snake_case : Union[str, Any] = clean_up_tokenization_spaces
if stop_sequence is not None:
_snake_case : str = self.tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
if len(lowerCamelCase_ ) > 1:
warnings.warn(
'Stopping on a multiple token sequence is not yet supported on transformers. The first token of'
' the stop sequence will be used as the stop sequence string in the interim.' )
_snake_case : List[str] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def __UpperCAmelCase ( self : List[Any] , *lowerCamelCase_ : Union[str, Any] , **lowerCamelCase_ : Dict ):
'''simple docstring'''
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({'add_space_before_punct_symbol': True} )
return super()._parse_and_tokenize(*lowerCamelCase_ , **lowerCamelCase_ )
def __call__( self : Any , lowerCamelCase_ : List[str] , **lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
return super().__call__(lowerCamelCase_ , **lowerCamelCase_ )
def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Union[str, Any]="" , lowerCamelCase_ : Any=None , **lowerCamelCase_ : int ):
'''simple docstring'''
_snake_case : Tuple = self.tokenizer(
prefix + prompt_text , padding=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , return_tensors=self.framework )
_snake_case : int = prompt_text
if handle_long_generation == "hole":
_snake_case : Any = inputs['input_ids'].shape[-1]
if "max_new_tokens" in generate_kwargs:
_snake_case : int = generate_kwargs['max_new_tokens']
else:
_snake_case : List[Any] = generate_kwargs.get('max_length' , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError('We cannot infer how many new tokens are expected' )
if cur_len + new_tokens > self.tokenizer.model_max_length:
_snake_case : Optional[int] = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
'We cannot use `hole` to handle this generation the number of desired tokens exceeds the'
' models max length' )
_snake_case : Dict = inputs['input_ids'][:, -keep_length:]
if "attention_mask" in inputs:
_snake_case : Dict = inputs['attention_mask'][:, -keep_length:]
return inputs
def __UpperCAmelCase ( self : List[Any] , lowerCamelCase_ : int , **lowerCamelCase_ : Optional[int] ):
'''simple docstring'''
_snake_case : str = model_inputs['input_ids']
_snake_case : Optional[int] = model_inputs.get('attention_mask' , lowerCamelCase_ )
# Allow empty prompts
if input_ids.shape[1] == 0:
_snake_case : Union[str, Any] = None
_snake_case : Union[str, Any] = None
_snake_case : Any = 1
else:
_snake_case : Optional[int] = input_ids.shape[0]
_snake_case : List[str] = model_inputs.pop('prompt_text' )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
_snake_case : str = generate_kwargs.pop('prefix_length' , 0 )
if prefix_length > 0:
_snake_case : Any = 'max_new_tokens' in generate_kwargs or (
'generation_config' in generate_kwargs
and generate_kwargs['generation_config'].max_new_tokens is not None
)
if not has_max_new_tokens:
_snake_case : Dict = generate_kwargs.get('max_length' ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
_snake_case : Optional[Any] = 'min_new_tokens' in generate_kwargs or (
'generation_config' in generate_kwargs
and generate_kwargs['generation_config'].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
_snake_case : List[str] = self.model.generate(input_ids=lowerCamelCase_ , attention_mask=lowerCamelCase_ , **lowerCamelCase_ )
_snake_case : str = generated_sequence.shape[0]
if self.framework == "pt":
_snake_case : Optional[Any] = generated_sequence.reshape(lowerCamelCase_ , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
_snake_case : List[Any] = tf.reshape(lowerCamelCase_ , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def __UpperCAmelCase ( self : int , lowerCamelCase_ : List[Any] , lowerCamelCase_ : List[str]=ReturnType.FULL_TEXT , lowerCamelCase_ : Any=True ):
'''simple docstring'''
_snake_case : List[str] = model_outputs['generated_sequence'][0]
_snake_case : Any = model_outputs['input_ids']
_snake_case : List[Any] = model_outputs['prompt_text']
_snake_case : Tuple = generated_sequence.numpy().tolist()
_snake_case : Dict = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
_snake_case : int = {'generated_token_ids': sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
_snake_case : List[Any] = self.tokenizer.decode(
lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ , clean_up_tokenization_spaces=lowerCamelCase_ , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
_snake_case : Optional[int] = 0
else:
_snake_case : List[str] = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=lowerCamelCase_ , clean_up_tokenization_spaces=lowerCamelCase_ , ) )
if return_type == ReturnType.FULL_TEXT:
_snake_case : List[Any] = prompt_text + text[prompt_length:]
else:
_snake_case : Dict = text[prompt_length:]
_snake_case : List[Any] = {'generated_text': all_text}
records.append(lowerCamelCase_ )
return records
| 304 |
# Lint as: python3
import itertools
import os
import re
lowercase_ : List[Any] = re.compile(r'''([A-Z]+)([A-Z][a-z])''')
lowercase_ : Tuple = re.compile(r'''([a-z\d])([A-Z])''')
lowercase_ : Dict = re.compile(r'''(?<!_)_(?!_)''')
lowercase_ : Optional[int] = re.compile(r'''(_{2,})''')
lowercase_ : List[str] = r'''^\w+(\.\w+)*$'''
lowercase_ : int = r'''<>:/\|?*'''
def A__( __lowerCAmelCase ):
_snake_case : int = _uppercase_uppercase_re.sub(R'\1_\2' , __lowerCAmelCase )
_snake_case : Any = _lowercase_uppercase_re.sub(R'\1_\2' , __lowerCAmelCase )
return name.lower()
def A__( __lowerCAmelCase ):
_snake_case : List[str] = _single_underscore_re.split(__lowerCAmelCase )
_snake_case : Any = [_multiple_underscores_re.split(__lowerCAmelCase ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(__lowerCAmelCase ) if n != '' )
def A__( __lowerCAmelCase ):
if os.path.basename(__lowerCAmelCase ) != name:
raise ValueError(F'''Should be a dataset name, not a path: {name}''' )
return camelcase_to_snakecase(__lowerCAmelCase )
def A__( __lowerCAmelCase , __lowerCAmelCase ):
if os.path.basename(__lowerCAmelCase ) != name:
raise ValueError(F'''Should be a dataset name, not a path: {name}''' )
if not re.match(_split_re , __lowerCAmelCase ):
raise ValueError(F'''Split name should match \'{_split_re}\'\' but got \'{split}\'.''' )
return F'''{filename_prefix_for_name(__lowerCAmelCase )}-{split}'''
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None ):
_snake_case : Dict = filename_prefix_for_split(__lowerCAmelCase , __lowerCAmelCase )
if filetype_suffix:
prefix += F'''.{filetype_suffix}'''
_snake_case : int = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
return F'''{filepath}*'''
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None ):
_snake_case : Any = filename_prefix_for_split(__lowerCAmelCase , __lowerCAmelCase )
_snake_case : Optional[Any] = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
if shard_lengths:
_snake_case : Dict = len(__lowerCAmelCase )
_snake_case : Optional[int] = [F'''{prefix}-{shard_id:05d}-of-{num_shards:05d}''' for shard_id in range(__lowerCAmelCase )]
if filetype_suffix:
_snake_case : Tuple = [filename + F'''.{filetype_suffix}''' for filename in filenames]
return filenames
else:
_snake_case : Dict = prefix
if filetype_suffix:
filename += F'''.{filetype_suffix}'''
return [filename]
| 304 | 1 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
a_ : Union[str, Any] = logging.get_logger(__name__)
@dataclass
class snake_case ( lowercase ):
"""simple docstring"""
_lowerCamelCase = [
"no_inference",
"no_cuda",
"no_tpu",
"no_speed",
"no_memory",
"no_env_print",
"no_multi_process",
]
def __init__( self , **UpperCamelCase ):
"""simple docstring"""
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
lowerCamelCase_ = deprecated_arg[3:]
lowerCamelCase_ = not kwargs.pop(UpperCamelCase )
logger.warning(
f'''{deprecated_arg} is depreciated. Please use --no-{positive_arg} or'''
f''' {positive_arg}={kwargs[positive_arg]}''' )
lowerCamelCase_ = kwargs.pop("tpu_name" , self.tpu_name )
lowerCamelCase_ = kwargs.pop("device_idx" , self.device_idx )
lowerCamelCase_ = kwargs.pop("eager_mode" , self.eager_mode )
lowerCamelCase_ = kwargs.pop("use_xla" , self.use_xla )
super().__init__(**UpperCamelCase )
_lowerCamelCase = field(
default=lowercase , metadata={"help": "Name of TPU"} , )
_lowerCamelCase = field(
default=0 , metadata={"help": "CPU / GPU device index. Defaults to 0."} , )
_lowerCamelCase = field(default=lowercase , metadata={"help": "Benchmark models in eager model."} )
_lowerCamelCase = field(
default=lowercase , metadata={
"help": "Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`."
} , )
@cached_property
def snake_case ( self ):
"""simple docstring"""
requires_backends(self , ["tf"] )
lowerCamelCase_ = None
if self.tpu:
try:
if self.tpu_name:
lowerCamelCase_ = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
lowerCamelCase_ = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
lowerCamelCase_ = None
return tpu
@cached_property
def snake_case ( self ):
"""simple docstring"""
requires_backends(self , ["tf"] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
lowerCamelCase_ = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , "GPU" )
lowerCamelCase_ = tf.distribute.OneDeviceStrategy(device=f'''/gpu:{self.device_idx}''' )
else:
tf.config.set_visible_devices([] , "GPU" ) # disable GPU
lowerCamelCase_ = tf.distribute.OneDeviceStrategy(device=f'''/cpu:{self.device_idx}''' )
return strategy
@property
def snake_case ( self ):
"""simple docstring"""
requires_backends(self , ["tf"] )
return self._setup_tpu is not None
@property
def snake_case ( self ):
"""simple docstring"""
requires_backends(self , ["tf"] )
return self._setup_strategy
@property
def snake_case ( self ):
"""simple docstring"""
requires_backends(self , ["tf"] )
return tf.config.list_physical_devices("GPU" )
@property
def snake_case ( self ):
"""simple docstring"""
requires_backends(self , ["tf"] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def snake_case ( self ):
"""simple docstring"""
return self.n_gpu > 0
| 707 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : List[str] = logging.get_logger(__name__)
a_ : Dict = {
"""microsoft/unispeech-large-1500h-cv""": (
"""https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json"""
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class snake_case ( lowercase ):
"""simple docstring"""
_lowerCamelCase = "unispeech"
def __init__( self , UpperCamelCase=32 , UpperCamelCase=768 , UpperCamelCase=12 , UpperCamelCase=12 , UpperCamelCase=3072 , UpperCamelCase="gelu" , UpperCamelCase=0.1 , UpperCamelCase=0.1 , UpperCamelCase=0.1 , UpperCamelCase=0.0 , UpperCamelCase=0.0 , UpperCamelCase=0.1 , UpperCamelCase=0.1 , UpperCamelCase=0.02 , UpperCamelCase=1e-5 , UpperCamelCase="group" , UpperCamelCase="gelu" , UpperCamelCase=(512, 512, 512, 512, 512, 512, 512) , UpperCamelCase=(5, 2, 2, 2, 2, 2, 2) , UpperCamelCase=(10, 3, 3, 3, 3, 2, 2) , UpperCamelCase=False , UpperCamelCase=128 , UpperCamelCase=16 , UpperCamelCase=False , UpperCamelCase=True , UpperCamelCase=0.05 , UpperCamelCase=10 , UpperCamelCase=2 , UpperCamelCase=0.0 , UpperCamelCase=10 , UpperCamelCase=0 , UpperCamelCase=320 , UpperCamelCase=2 , UpperCamelCase=0.1 , UpperCamelCase=100 , UpperCamelCase=256 , UpperCamelCase=256 , UpperCamelCase=0.1 , UpperCamelCase="mean" , UpperCamelCase=False , UpperCamelCase=False , UpperCamelCase=256 , UpperCamelCase=80 , UpperCamelCase=0 , UpperCamelCase=1 , UpperCamelCase=2 , UpperCamelCase=0.5 , **UpperCamelCase , ):
"""simple docstring"""
super().__init__(**UpperCamelCase , pad_token_id=UpperCamelCase , bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase )
lowerCamelCase_ = hidden_size
lowerCamelCase_ = feat_extract_norm
lowerCamelCase_ = feat_extract_activation
lowerCamelCase_ = list(UpperCamelCase )
lowerCamelCase_ = list(UpperCamelCase )
lowerCamelCase_ = list(UpperCamelCase )
lowerCamelCase_ = conv_bias
lowerCamelCase_ = num_conv_pos_embeddings
lowerCamelCase_ = num_conv_pos_embedding_groups
lowerCamelCase_ = len(self.conv_dim )
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = hidden_dropout
lowerCamelCase_ = attention_dropout
lowerCamelCase_ = activation_dropout
lowerCamelCase_ = feat_proj_dropout
lowerCamelCase_ = final_dropout
lowerCamelCase_ = layerdrop
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = initializer_range
lowerCamelCase_ = num_ctc_classes
lowerCamelCase_ = vocab_size
lowerCamelCase_ = do_stable_layer_norm
lowerCamelCase_ = use_weighted_layer_sum
lowerCamelCase_ = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCamelCase_ = apply_spec_augment
lowerCamelCase_ = mask_time_prob
lowerCamelCase_ = mask_time_length
lowerCamelCase_ = mask_time_min_masks
lowerCamelCase_ = mask_feature_prob
lowerCamelCase_ = mask_feature_length
lowerCamelCase_ = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowerCamelCase_ = num_codevectors_per_group
lowerCamelCase_ = num_codevector_groups
lowerCamelCase_ = contrastive_logits_temperature
lowerCamelCase_ = feat_quantizer_dropout
lowerCamelCase_ = num_negatives
lowerCamelCase_ = codevector_dim
lowerCamelCase_ = proj_codevector_dim
lowerCamelCase_ = diversity_loss_weight
# ctc loss
lowerCamelCase_ = ctc_loss_reduction
lowerCamelCase_ = ctc_zero_infinity
# pretraining loss
lowerCamelCase_ = replace_prob
@property
def snake_case ( self ):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 445 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCamelCase : Any = {"""configuration_xlnet""": ["""XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLNetConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : int = ["""XLNetTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : int = ["""XLNetTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Union[str, Any] = [
"""XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLNetForMultipleChoice""",
"""XLNetForQuestionAnswering""",
"""XLNetForQuestionAnsweringSimple""",
"""XLNetForSequenceClassification""",
"""XLNetForTokenClassification""",
"""XLNetLMHeadModel""",
"""XLNetModel""",
"""XLNetPreTrainedModel""",
"""load_tf_weights_in_xlnet""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Dict = [
"""TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLNetForMultipleChoice""",
"""TFXLNetForQuestionAnsweringSimple""",
"""TFXLNetForSequenceClassification""",
"""TFXLNetForTokenClassification""",
"""TFXLNetLMHeadModel""",
"""TFXLNetMainLayer""",
"""TFXLNetModel""",
"""TFXLNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
_lowerCamelCase : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 87 | import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : Union[str, Any] , snake_case__ : int , snake_case__ : List[str]=2 , snake_case__ : List[str]=3 , snake_case__ : Tuple=4 , snake_case__ : Optional[Any]=2 , snake_case__ : int=7 , snake_case__ : Optional[Any]=True , snake_case__ : Union[str, Any]=True , snake_case__ : Tuple=True , snake_case__ : int=True , snake_case__ : Optional[Any]=9_9 , snake_case__ : Optional[Any]=3_6 , snake_case__ : Tuple=3 , snake_case__ : int=4 , snake_case__ : Tuple=3_7 , snake_case__ : Union[str, Any]="gelu" , snake_case__ : Any=0.1 , snake_case__ : Optional[int]=0.1 , snake_case__ : Dict=5_1_2 , snake_case__ : Optional[int]=1_6 , snake_case__ : Dict=2 , snake_case__ : Dict=0.02 , snake_case__ : List[Any]=6 , snake_case__ : int=6 , snake_case__ : Tuple=3 , snake_case__ : List[Any]=4 , snake_case__ : List[str]=None , snake_case__ : Optional[Any]=1_0_0_0 , ) -> Optional[int]:
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = num_channels
_lowerCamelCase = image_size
_lowerCamelCase = patch_size
_lowerCamelCase = text_seq_length
_lowerCamelCase = is_training
_lowerCamelCase = use_input_mask
_lowerCamelCase = use_token_type_ids
_lowerCamelCase = use_labels
_lowerCamelCase = vocab_size
_lowerCamelCase = hidden_size
_lowerCamelCase = num_hidden_layers
_lowerCamelCase = num_attention_heads
_lowerCamelCase = intermediate_size
_lowerCamelCase = hidden_act
_lowerCamelCase = hidden_dropout_prob
_lowerCamelCase = attention_probs_dropout_prob
_lowerCamelCase = max_position_embeddings
_lowerCamelCase = type_vocab_size
_lowerCamelCase = type_sequence_label_size
_lowerCamelCase = initializer_range
_lowerCamelCase = coordinate_size
_lowerCamelCase = shape_size
_lowerCamelCase = num_labels
_lowerCamelCase = num_choices
_lowerCamelCase = scope
_lowerCamelCase = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
_lowerCamelCase = text_seq_length
_lowerCamelCase = (image_size // patch_size) ** 2 + 1
_lowerCamelCase = self.text_seq_length + self.image_seq_length
def _snake_case ( self : int ) -> Any:
_lowerCamelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
_lowerCamelCase = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_lowerCamelCase = bbox[i, j, 3]
_lowerCamelCase = bbox[i, j, 1]
_lowerCamelCase = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_lowerCamelCase = bbox[i, j, 2]
_lowerCamelCase = bbox[i, j, 0]
_lowerCamelCase = t
_lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase = None
if self.use_input_mask:
_lowerCamelCase = random_attention_mask([self.batch_size, self.text_seq_length] )
_lowerCamelCase = None
if self.use_token_type_ids:
_lowerCamelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
_lowerCamelCase = None
_lowerCamelCase = None
if self.use_labels:
_lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCamelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
_lowerCamelCase = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def _snake_case ( self : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : Any , snake_case__ : str , snake_case__ : Tuple , snake_case__ : Dict , snake_case__ : Optional[int] , snake_case__ : int , snake_case__ : Optional[Any] ) -> Any:
_lowerCamelCase = LayoutLMvaModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
# text + image
_lowerCamelCase = model(snake_case__ , pixel_values=snake_case__ )
_lowerCamelCase = model(
snake_case__ , bbox=snake_case__ , pixel_values=snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ )
_lowerCamelCase = model(snake_case__ , bbox=snake_case__ , pixel_values=snake_case__ , token_type_ids=snake_case__ )
_lowerCamelCase = model(snake_case__ , bbox=snake_case__ , pixel_values=snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
_lowerCamelCase = model(snake_case__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
_lowerCamelCase = model(pixel_values=snake_case__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def _snake_case ( self : Any , snake_case__ : Tuple , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] , snake_case__ : List[Any] , snake_case__ : Any , snake_case__ : str , snake_case__ : str , snake_case__ : int ) -> List[Any]:
_lowerCamelCase = self.num_labels
_lowerCamelCase = LayoutLMvaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
_lowerCamelCase = model(
snake_case__ , bbox=snake_case__ , pixel_values=snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self : Dict , snake_case__ : Tuple , snake_case__ : Optional[Any] , snake_case__ : str , snake_case__ : str , snake_case__ : Any , snake_case__ : List[str] , snake_case__ : Tuple , snake_case__ : Dict ) -> Optional[Any]:
_lowerCamelCase = self.num_labels
_lowerCamelCase = LayoutLMvaForTokenClassification(config=snake_case__ )
model.to(snake_case__ )
model.eval()
_lowerCamelCase = model(
snake_case__ , bbox=snake_case__ , pixel_values=snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def _snake_case ( self : str , snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : str , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : List[Any] , snake_case__ : int , snake_case__ : Any ) -> Dict:
_lowerCamelCase = LayoutLMvaForQuestionAnswering(config=snake_case__ )
model.to(snake_case__ )
model.eval()
_lowerCamelCase = model(
snake_case__ , bbox=snake_case__ , pixel_values=snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self : str ) -> List[str]:
_lowerCamelCase = self.prepare_config_and_inputs()
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) = config_and_inputs
_lowerCamelCase = {
'input_ids': input_ids,
'bbox': bbox,
'pixel_values': pixel_values,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCAmelCase_ = (
{'document-question-answering': LayoutLMvaForQuestionAnswering, 'feature-extraction': LayoutLMvaModel}
if is_torch_available()
else {}
)
def _snake_case ( self : Union[str, Any] , snake_case__ : Tuple , snake_case__ : Optional[Any] , snake_case__ : int , snake_case__ : Tuple , snake_case__ : Union[str, Any] ) -> str:
# `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual
# embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has
# the sequence dimension of the text embedding only.
# (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`)
return True
def _snake_case ( self : str ) -> Any:
_lowerCamelCase = LayoutLMvaModelTester(self )
_lowerCamelCase = ConfigTester(self , config_class=snake_case__ , hidden_size=3_7 )
def _snake_case ( self : List[str] , snake_case__ : str , snake_case__ : List[Any] , snake_case__ : Dict=False ) -> str:
_lowerCamelCase = copy.deepcopy(snake_case__ )
if model_class in get_values(snake_case__ ):
_lowerCamelCase = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(snake_case__ , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(snake_case__ ):
_lowerCamelCase = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=snake_case__ )
elif model_class in get_values(snake_case__ ):
_lowerCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case__ )
_lowerCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case__ )
elif model_class in [
*get_values(snake_case__ ),
]:
_lowerCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case__ )
elif model_class in [
*get_values(snake_case__ ),
]:
_lowerCamelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=snake_case__ , )
return inputs_dict
def _snake_case ( self : Any ) -> List[str]:
self.config_tester.run_common_tests()
def _snake_case ( self : int ) -> Optional[int]:
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def _snake_case ( self : int ) -> Any:
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowerCamelCase = type
self.model_tester.create_and_check_model(*snake_case__ )
def _snake_case ( self : int ) -> Any:
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case__ )
def _snake_case ( self : Any ) -> Any:
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case__ )
def _snake_case ( self : int ) -> List[Any]:
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case__ )
@slow
def _snake_case ( self : Optional[int] ) -> List[Any]:
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase = LayoutLMvaModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def lowerCamelCase ( ) -> List[Any]:
_lowerCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _snake_case ( self : int ) -> Any:
return LayoutLMvaImageProcessor(apply_ocr=snake_case__ ) if is_vision_available() else None
@slow
def _snake_case ( self : List[str] ) -> Tuple:
_lowerCamelCase = LayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' ).to(snake_case__ )
_lowerCamelCase = self.default_image_processor
_lowerCamelCase = prepare_img()
_lowerCamelCase = image_processor(images=snake_case__ , return_tensors='pt' ).pixel_values.to(snake_case__ )
_lowerCamelCase = torch.tensor([[1, 2]] )
_lowerCamelCase = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
_lowerCamelCase = model(
input_ids=input_ids.to(snake_case__ ) , bbox=bbox.to(snake_case__ ) , pixel_values=pixel_values.to(snake_case__ ) , )
# verify the logits
_lowerCamelCase = torch.Size((1, 1_9_9, 7_6_8) )
self.assertEqual(outputs.last_hidden_state.shape , snake_case__ )
_lowerCamelCase = torch.tensor(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ).to(snake_case__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , snake_case__ , atol=1e-4 ) ) | 544 | 0 |
_A = range(2, 20 + 1)
_A = [10**k for k in range(ks[-1] + 1)]
_A = {}
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : Optional[Any] , UpperCamelCase : Tuple , UpperCamelCase : List[str] , UpperCamelCase : str ) -> str:
"""simple docstring"""
a_ = sum(a_i[j] for j in range(_UpperCAmelCase , len(_UpperCAmelCase ) ) )
a_ = sum(a_i[j] * base[j] for j in range(min(len(_UpperCAmelCase ) , _UpperCAmelCase ) ) )
a_ , a_ = 0, 0
a_ = n - i
a_ = memo.get(_UpperCAmelCase )
if sub_memo is not None:
a_ = sub_memo.get(_UpperCAmelCase )
if jumps is not None and len(_UpperCAmelCase ) > 0:
# find and make the largest jump without going over
a_ = -1
for _k in range(len(_UpperCAmelCase ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
a_ = _k
break
if max_jump >= 0:
a_ , a_ , a_ = jumps[max_jump]
# since the difference between jumps is cached, add c
a_ = diff + c
for j in range(min(_UpperCAmelCase , len(_UpperCAmelCase ) ) ):
a_ , a_ = divmod(_UpperCAmelCase , 10 )
if new_c > 0:
add(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
else:
a_ = []
else:
a_ = {c: []}
a_ = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
a_ , a_ = next_term(_UpperCAmelCase , k - 1 , i + dn , _UpperCAmelCase )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
a_ , a_ = compute(_UpperCAmelCase , _UpperCAmelCase , i + dn , _UpperCAmelCase )
diff += _diff
dn += terms_jumped
a_ = sub_memo[c]
# keep jumps sorted by # of terms skipped
a_ = 0
while j < len(_UpperCAmelCase ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(_UpperCAmelCase , (diff, dn, k) )
return (diff, dn)
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : Any , UpperCamelCase : List[Any] , UpperCamelCase : int , UpperCamelCase : int ) -> Optional[int]:
"""simple docstring"""
if i >= n:
return 0, i
if k > len(_UpperCAmelCase ):
a_i.extend([0 for _ in range(k - len(_UpperCAmelCase ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
a_ = i
a_ , a_ , a_ = 0, 0, 0
for j in range(len(_UpperCAmelCase ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
a_ = ds_c + ds_b
diff += addend
a_ = 0
for j in range(_UpperCAmelCase ):
a_ = a_i[j] + addend
a_ , a_ = divmod(_UpperCAmelCase , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return diff, i - start_i
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : Union[str, Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Any ) -> Dict:
"""simple docstring"""
for j in range(_UpperCAmelCase , len(_UpperCAmelCase ) ):
a_ = digits[j] + addend
if s >= 10:
a_ , a_ = divmod(_UpperCAmelCase , 10 )
a_ = addend // 10 + quotient
else:
a_ = s
a_ = addend // 10
if addend == 0:
break
while addend > 0:
a_ , a_ = divmod(_UpperCAmelCase , 10 )
digits.append(_UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : str = 10**15 ) -> Union[str, Any]:
"""simple docstring"""
a_ = [1]
a_ = 1
a_ = 0
while True:
a_ , a_ = next_term(_UpperCAmelCase , 20 , i + dn , _UpperCAmelCase )
dn += terms_jumped
if dn == n - i:
break
a_ = 0
for j in range(len(_UpperCAmelCase ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(f'{solution() = }') | 716 |
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class lowerCamelCase_ ( unittest.TestCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=4 , ):
a_ = parent
a_ = batch_size
a_ = seq_length
a_ = is_training
a_ = use_attention_mask
a_ = use_token_type_ids
a_ = use_labels
a_ = vocab_size
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = type_vocab_size
a_ = type_sequence_label_size
a_ = initializer_range
a_ = num_choices
def __magic_name__ ( self ):
a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a_ = None
if self.use_attention_mask:
a_ = random_attention_mask([self.batch_size, self.seq_length] )
a_ = None
if self.use_token_type_ids:
a_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a_ = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __magic_name__ ( self ):
a_ = self.prepare_config_and_inputs()
a_ , a_ , a_ , a_ = config_and_inputs
a_ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def __magic_name__ ( self ):
a_ = self.prepare_config_and_inputs()
a_ , a_ , a_ , a_ = config_and_inputs
a_ = True
a_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
a_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
_lowerCamelCase : Optional[Any] = True
_lowerCamelCase : List[Any] = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __magic_name__ ( self ):
a_ = FlaxRobertaPreLayerNormModelTester(self )
@slow
def __magic_name__ ( self ):
for model_class_name in self.all_model_classes:
a_ = model_class_name.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=_SCREAMING_SNAKE_CASE )
a_ = model(np.ones((1, 1) ) )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
@require_flax
class lowerCamelCase_ ( unittest.TestCase ):
@slow
def __magic_name__ ( self ):
a_ = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=_SCREAMING_SNAKE_CASE )
a_ = np.array([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] , dtype=jnp.intaa )
a_ = model(_SCREAMING_SNAKE_CASE )[0]
a_ = [1, 11, 5_0265]
self.assertEqual(list(output.shape ) , _SCREAMING_SNAKE_CASE )
# compare the actual values for a slice.
a_ = np.array(
[[[4_0.4_8_8_0, 1_8.0_1_9_9, -5.2_3_6_7], [-1.8_8_7_7, -4.0_8_8_5, 1_0.7_0_8_5], [-2.2_6_1_3, -5.6_1_1_0, 7.2_6_6_5]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
@slow
def __magic_name__ ( self ):
a_ = FlaxRobertaPreLayerNormModel.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=_SCREAMING_SNAKE_CASE )
a_ = np.array([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] , dtype=jnp.intaa )
a_ = model(_SCREAMING_SNAKE_CASE )[0]
# compare the actual values for a slice.
a_ = np.array(
[[[0.0_2_0_8, -0.0_3_5_6, 0.0_2_3_7], [-0.1_5_6_9, -0.0_4_1_1, -0.2_6_2_6], [0.1_8_7_9, 0.0_1_2_5, -0.0_0_8_9]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) ) | 403 | 0 |
'''simple docstring'''
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
lowercase_ = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def lowerCAmelCase (__A):
"""simple docstring"""
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def lowerCAmelCase (__A , __A , __A):
"""simple docstring"""
return max(metric_fn(__A , __A) for gt in ground_truths)
def lowerCAmelCase (__A , __A , __A):
"""simple docstring"""
_a = [line.strip() for line in open(__A , '''r''').readlines()]
_a = []
if args.gold_data_mode == "qa":
_a = pd.read_csv(__A , sep='''\t''' , header=__A)
for answer_list in data[1]:
_a = ast.literal_eval(__A)
answers.append(__A)
else:
_a = [line.strip() for line in open(__A , '''r''').readlines()]
_a = [[reference] for reference in references]
_a = _a = _a = 0
for prediction, ground_truths in zip(__A , __A):
total += 1
em += metric_max_over_ground_truths(__A , __A , __A)
fa += metric_max_over_ground_truths(__A , __A , __A)
_a = 1_00.0 * em / total
_a = 1_00.0 * fa / total
logger.info(F'''F1: {fa:.2f}''')
logger.info(F'''EM: {em:.2f}''')
def lowerCAmelCase (__A , __A , __A):
"""simple docstring"""
_a = args.k
_a = [line.strip() for line in open(__A , '''r''').readlines()]
_a = [line.strip() for line in open(__A , '''r''').readlines()]
_a = _a = 0
for hypo, reference in zip(__A , __A):
_a = set(hypo.split('''\t''')[:k])
_a = set(reference.split('''\t'''))
total += 1
em += len(hypo_provenance & ref_provenance) / k
_a = 1_00.0 * em / total
logger.info(F'''Precision@{k}: {em: .2f}''')
def lowerCAmelCase (__A , __A , __A):
"""simple docstring"""
def strip_title(__A):
if title.startswith('''"'''):
_a = title[1:]
if title.endswith('''"'''):
_a = title[:-1]
return title
_a = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
__A , return_tensors='''pt''' , padding=__A , truncation=__A , )['''input_ids'''].to(args.device)
_a = rag_model.rag.question_encoder(__A)
_a = question_enc_outputs[0]
_a = rag_model.retriever(
__A , question_enc_pool_output.cpu().detach().to(torch.floataa).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors='''pt''' , )
_a = rag_model.retriever.index.get_doc_dicts(result.doc_ids)
_a = []
for docs in all_docs:
_a = [strip_title(__A) for title in docs['''title''']]
provenance_strings.append('''\t'''.join(__A))
return provenance_strings
def lowerCAmelCase (__A , __A , __A):
"""simple docstring"""
with torch.no_grad():
_a = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
__A , return_tensors='''pt''' , padding=__A , truncation=__A)
_a = inputs_dict.input_ids.to(args.device)
_a = inputs_dict.attention_mask.to(args.device)
_a = rag_model.generate( # rag_model overwrites generate
__A , attention_mask=__A , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=__A , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
_a = rag_model.retriever.generator_tokenizer.batch_decode(__A , skip_special_tokens=__A)
if args.print_predictions:
for q, a in zip(__A , __A):
logger.info('''Q: {} - A: {}'''.format(__A , __A))
return answers
def lowerCAmelCase ():
"""simple docstring"""
_a = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''' , choices=['''rag_sequence''', '''rag_token''', '''bart'''] , type=__A , help=(
'''RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the'''
''' model_name_or_path'''
) , )
parser.add_argument(
'''--index_name''' , default=__A , choices=['''exact''', '''compressed''', '''legacy'''] , type=__A , help='''RAG model retriever type''' , )
parser.add_argument(
'''--index_path''' , default=__A , type=__A , help='''Path to the retrieval index''' , )
parser.add_argument('''--n_docs''' , default=5 , type=__A , help='''Number of retrieved docs''')
parser.add_argument(
'''--model_name_or_path''' , default=__A , type=__A , required=__A , help='''Path to pretrained checkpoints or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--eval_mode''' , choices=['''e2e''', '''retrieval'''] , default='''e2e''' , type=__A , help=(
'''Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates'''
''' precision@k.'''
) , )
parser.add_argument('''--k''' , default=1 , type=__A , help='''k for the precision@k calculation''')
parser.add_argument(
'''--evaluation_set''' , default=__A , type=__A , required=__A , help='''Path to a file containing evaluation samples''' , )
parser.add_argument(
'''--gold_data_path''' , default=__A , type=__A , required=__A , help='''Path to a tab-separated file with gold samples''' , )
parser.add_argument(
'''--gold_data_mode''' , default='''qa''' , type=__A , choices=['''qa''', '''ans'''] , help=(
'''Format of the gold data file'''
'''qa - a single line in the following format: question [tab] answer_list'''
'''ans - a single line of the gold file contains the expected answer string'''
) , )
parser.add_argument(
'''--predictions_path''' , type=__A , default='''predictions.txt''' , help='''Name of the predictions file, to be stored in the checkpoints directory''' , )
parser.add_argument(
'''--eval_all_checkpoints''' , action='''store_true''' , help='''Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number''' , )
parser.add_argument(
'''--eval_batch_size''' , default=8 , type=__A , help='''Batch size per GPU/CPU for evaluation.''' , )
parser.add_argument(
'''--recalculate''' , help='''Recalculate predictions even if the prediction file exists''' , action='''store_true''' , )
parser.add_argument(
'''--num_beams''' , default=4 , type=__A , help='''Number of beams to be used when generating answers''' , )
parser.add_argument('''--min_length''' , default=1 , type=__A , help='''Min length of the generated answers''')
parser.add_argument('''--max_length''' , default=50 , type=__A , help='''Max length of the generated answers''')
parser.add_argument(
'''--print_predictions''' , action='''store_true''' , help='''If True, prints predictions while evaluating.''' , )
parser.add_argument(
'''--print_docs''' , action='''store_true''' , help='''If True, prints docs retried while generating.''' , )
_a = parser.parse_args()
_a = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''')
return args
def lowerCAmelCase (__A):
"""simple docstring"""
_a = {}
if args.model_type is None:
_a = infer_model_type(args.model_name_or_path)
assert args.model_type is not None
if args.model_type.startswith('''rag'''):
_a = RagTokenForGeneration if args.model_type == '''rag_token''' else RagSequenceForGeneration
_a = args.n_docs
if args.index_name is not None:
_a = args.index_name
if args.index_path is not None:
_a = args.index_path
else:
_a = BartForConditionalGeneration
_a = (
[f.path for f in os.scandir(args.model_name_or_path) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info('''Evaluate the following checkpoints: %s''' , __A)
_a = get_scores if args.eval_mode == '''e2e''' else get_precision_at_k
_a = evaluate_batch_eae if args.eval_mode == '''e2e''' else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path) and (not args.recalculate):
logger.info('''Calculating metrics based on an existing predictions file: {}'''.format(args.predictions_path))
score_fn(__A , args.predictions_path , args.gold_data_path)
continue
logger.info('''***** Running evaluation for {} *****'''.format(__A))
logger.info(''' Batch size = %d''' , args.eval_batch_size)
logger.info(''' Predictions will be stored under {}'''.format(args.predictions_path))
if args.model_type.startswith('''rag'''):
_a = RagRetriever.from_pretrained(__A , **__A)
_a = model_class.from_pretrained(__A , retriever=__A , **__A)
model.retriever.init_retrieval()
else:
_a = model_class.from_pretrained(__A , **__A)
model.to(args.device)
with open(args.evaluation_set , '''r''') as eval_file, open(args.predictions_path , '''w''') as preds_file:
_a = []
for line in tqdm(__A):
questions.append(line.strip())
if len(__A) == args.eval_batch_size:
_a = evaluate_batch_fn(__A , __A , __A)
preds_file.write('''\n'''.join(__A) + '''\n''')
preds_file.flush()
_a = []
if len(__A) > 0:
_a = evaluate_batch_fn(__A , __A , __A)
preds_file.write('''\n'''.join(__A))
preds_file.flush()
score_fn(__A , args.predictions_path , args.gold_data_path)
if __name__ == "__main__":
lowercase_ = get_args()
main(args)
| 11 |
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class __A :
'''simple docstring'''
def __init__(self , A , A=13 , A=7 , A=True , A=True , A=False , A=True , A=99 , A=32 , A=5 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=512 , A=16 , A=2 , A=0.02 , A=3 , A=4 , A=None , ) -> str:
"""simple docstring"""
_a = parent
_a = batch_size
_a = seq_length
_a = is_training
_a = use_input_mask
_a = use_token_type_ids
_a = use_labels
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = type_vocab_size
_a = type_sequence_label_size
_a = initializer_range
_a = num_labels
_a = num_choices
_a = scope
def a__ (self ) -> List[str]:
"""simple docstring"""
_a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a = None
if self.use_input_mask:
_a = random_attention_mask([self.batch_size, self.seq_length] )
_a = None
if self.use_token_type_ids:
_a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_a = None
_a = None
_a = None
if self.use_labels:
_a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_a = ids_tensor([self.batch_size] , self.num_choices )
_a = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ (self ) -> Optional[int]:
"""simple docstring"""
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A , initializer_range=self.initializer_range , use_stable_embedding=A , )
def a__ (self , A , A , A , A , A , A , A ) -> Any:
"""simple docstring"""
_a = OpenLlamaModel(config=A )
model.to(A )
model.eval()
_a = model(A , attention_mask=A )
_a = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ (self , A , A , A , A , A , A , A , A , A , ) -> Any:
"""simple docstring"""
_a = True
_a = OpenLlamaModel(A )
model.to(A )
model.eval()
_a = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , )
_a = model(
A , attention_mask=A , encoder_hidden_states=A , )
_a = model(A , attention_mask=A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ (self , A , A , A , A , A , A , A , A , A , ) -> Tuple:
"""simple docstring"""
_a = OpenLlamaForCausalLM(config=A )
model.to(A )
model.eval()
_a = model(A , attention_mask=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ (self , A , A , A , A , A , A , A , A , A , ) -> Dict:
"""simple docstring"""
_a = True
_a = True
_a = OpenLlamaForCausalLM(config=A )
model.to(A )
model.eval()
# first forward pass
_a = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , use_cache=A , )
_a = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_a = ids_tensor((self.batch_size, 3) , config.vocab_size )
_a = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
_a = torch.cat([input_ids, next_tokens] , dim=-1 )
_a = torch.cat([input_mask, next_mask] , dim=-1 )
_a = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , output_hidden_states=A , )['''hidden_states'''][0]
_a = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , past_key_values=A , output_hidden_states=A , )['''hidden_states'''][0]
# select random slice
_a = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_a = output_from_no_past[:, -3:, random_slice_idx].detach()
_a = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(A , A , atol=1E-3 ) )
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
_a = self.prepare_config_and_inputs()
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) = config_and_inputs
_a = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __A ( A , A , A , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
__lowerCamelCase : Any = (OpenLlamaForCausalLM,) if is_torch_available() else ()
__lowerCamelCase : List[Any] = (
{
'feature-extraction': OpenLlamaModel,
'text-classification': OpenLlamaForSequenceClassification,
'text-generation': OpenLlamaForCausalLM,
'zero-shot': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowerCamelCase : List[str] = False
__lowerCamelCase : List[str] = False
def a__ (self ) -> Tuple:
"""simple docstring"""
_a = OpenLlamaModelTester(self )
_a = ConfigTester(self , config_class=A , hidden_size=37 )
def a__ (self ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def a__ (self ) -> str:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_a = type
self.model_tester.create_and_check_model(*A )
def a__ (self ) -> Any:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
_a = 3
_a = input_dict['''input_ids''']
_a = input_ids.ne(1 ).to(A )
_a = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_a = OpenLlamaForSequenceClassification(A )
model.to(A )
model.eval()
_a = model(A , attention_mask=A , labels=A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def a__ (self ) -> Dict:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
_a = 3
_a = '''single_label_classification'''
_a = input_dict['''input_ids''']
_a = input_ids.ne(1 ).to(A )
_a = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_a = OpenLlamaForSequenceClassification(A )
model.to(A )
model.eval()
_a = model(A , attention_mask=A , labels=A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
_a = 3
_a = '''multi_label_classification'''
_a = input_dict['''input_ids''']
_a = input_ids.ne(1 ).to(A )
_a = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
_a = OpenLlamaForSequenceClassification(A )
model.to(A )
model.eval()
_a = model(A , attention_mask=A , labels=A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('''Open-Llama buffers include complex numbers, which breaks this test''' )
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
pass
@parameterized.expand([('''linear''',), ('''dynamic''',)] )
def a__ (self , A ) -> Optional[int]:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
_a = ids_tensor([1, 10] , config.vocab_size )
_a = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
_a = OpenLlamaModel(A )
original_model.to(A )
original_model.eval()
_a = original_model(A ).last_hidden_state
_a = original_model(A ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
_a = {'''type''': scaling_type, '''factor''': 10.0}
_a = OpenLlamaModel(A )
scaled_model.to(A )
scaled_model.eval()
_a = scaled_model(A ).last_hidden_state
_a = scaled_model(A ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(A , A , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(A , A , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(A , A , atol=1E-5 ) )
| 11 | 1 |
'''simple docstring'''
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Any , a_ : int ):
"""simple docstring"""
__snake_case = n
__snake_case = [None] * self.n
__snake_case = 0 # index of the first element
__snake_case = 0
__snake_case = 0
def __len__( self : Optional[Any] ):
"""simple docstring"""
return self.size
def A ( self : Optional[int] ):
"""simple docstring"""
return self.size == 0
def A ( self : int ):
"""simple docstring"""
return False if self.is_empty() else self.array[self.front]
def A ( self : Any , a_ : List[str] ):
"""simple docstring"""
if self.size >= self.n:
raise Exception("QUEUE IS FULL" )
__snake_case = data
__snake_case = (self.rear + 1) % self.n
self.size += 1
return self
def A ( self : Dict ):
"""simple docstring"""
if self.size == 0:
raise Exception("UNDERFLOW" )
__snake_case = self.array[self.front]
__snake_case = None
__snake_case = (self.front + 1) % self.n
self.size -= 1
return temp
| 680 |
'''simple docstring'''
import math
import sys
import cva
import numpy as np
def __UpperCAmelCase ( _UpperCAmelCase : np.ndarray , _UpperCAmelCase : float ) -> np.ndarray:
# For applying gaussian function for each element in matrix.
__snake_case = math.sqrt(_UpperCAmelCase )
__snake_case = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def __UpperCAmelCase ( _UpperCAmelCase : np.ndarray , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> np.ndarray:
__snake_case = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def __UpperCAmelCase ( _UpperCAmelCase : int , _UpperCAmelCase : float ) -> np.ndarray:
# Creates a gaussian kernel of given dimension.
__snake_case = np.zeros((kernel_size, kernel_size) )
for i in range(0 , _UpperCAmelCase ):
for j in range(0 , _UpperCAmelCase ):
__snake_case = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(_UpperCAmelCase , _UpperCAmelCase )
def __UpperCAmelCase ( _UpperCAmelCase : np.ndarray , _UpperCAmelCase : float , _UpperCAmelCase : float , _UpperCAmelCase : int , ) -> np.ndarray:
__snake_case = np.zeros(img.shape )
__snake_case = get_gauss_kernel(_UpperCAmelCase , _UpperCAmelCase )
__snake_case , __snake_case = img.shape
for i in range(kernel_size // 2 , size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 , size_y - kernel_size // 2 ):
__snake_case = get_slice(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__snake_case = img_s - img_s[kernel_size // 2, kernel_size // 2]
__snake_case = vec_gaussian(_UpperCAmelCase , _UpperCAmelCase )
__snake_case = np.multiply(_UpperCAmelCase , _UpperCAmelCase )
__snake_case = np.multiply(_UpperCAmelCase , _UpperCAmelCase )
__snake_case = np.sum(_UpperCAmelCase ) / np.sum(_UpperCAmelCase )
__snake_case = val
return imga
def __UpperCAmelCase ( _UpperCAmelCase : list ) -> tuple:
__snake_case = args[1] if args[1:] else "../image_data/lena.jpg"
__snake_case = float(args[2] ) if args[2:] else 1.0
__snake_case = float(args[3] ) if args[3:] else 1.0
if args[4:]:
__snake_case = int(args[4] )
__snake_case = kernel_size + abs(kernel_size % 2 - 1 )
else:
__snake_case = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
a , a , a , a : Tuple = parse_args(sys.argv)
a : Tuple = cva.imread(filename, 0)
cva.imshow('''input image''', img)
a : Dict = img / 255
a : str = out.astype('''float32''')
a : Union[str, Any] = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
a : Dict = out * 255
a : List[str] = np.uinta(out)
cva.imshow('''output image''', out)
cva.waitKey(0)
cva.destroyAllWindows()
| 680 | 1 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase = {"""configuration_mmbt""": ["""MMBTConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = ["""MMBTForClassification""", """MMBTModel""", """ModalEmbeddings"""]
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 569 |
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
_lowerCAmelCase = """sshleifer/bart-tiny-random"""
_lowerCAmelCase = """patrickvonplaten/t5-tiny-random"""
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
@cached_property
def _lowerCamelCase ( self ):
return AutoConfig.from_pretrained(a__ )
def _lowerCamelCase ( self ):
A_ , *A_ : Optional[Any] = create_student_by_copying_alternating_layers(a__ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def _lowerCamelCase ( self ):
A_ , *A_ : Tuple = create_student_by_copying_alternating_layers(a__ , tempfile.mkdtemp() , e=1 , d=a__ )
def _lowerCamelCase ( self ):
A_ , *A_ : int = create_student_by_copying_alternating_layers(a__ , tempfile.mkdtemp() , e=1 , d=a__ )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def _lowerCamelCase ( self ):
A_ , *A_ : Tuple = create_student_by_copying_alternating_layers(a__ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def _lowerCamelCase ( self ):
with self.assertRaises(a__ ):
create_student_by_copying_alternating_layers(a__ , tempfile.mkdtemp() , e=a__ , d=a__ )
| 569 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCAmelCase : str = {"configuration_xlnet": ["XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLNetConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : List[str] = ["XLNetTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str = ["XLNetTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : int = [
"XLNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLNetForMultipleChoice",
"XLNetForQuestionAnswering",
"XLNetForQuestionAnsweringSimple",
"XLNetForSequenceClassification",
"XLNetForTokenClassification",
"XLNetLMHeadModel",
"XLNetModel",
"XLNetPreTrainedModel",
"load_tf_weights_in_xlnet",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Any = [
"TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLNetForMultipleChoice",
"TFXLNetForQuestionAnsweringSimple",
"TFXLNetForSequenceClassification",
"TFXLNetForTokenClassification",
"TFXLNetLMHeadModel",
"TFXLNetMainLayer",
"TFXLNetModel",
"TFXLNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 674 | """simple docstring"""
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None ):
"""simple docstring"""
assert torch_layer.weight.shape == weight.shape, f"""{torch_layer} layer.weight does not match"""
lowerCAmelCase__ = nn.Parameter(lowerCamelCase__ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f"""{torch_layer} layer.bias does not match"""
lowerCAmelCase__ = nn.Parameter(lowerCamelCase__ )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = np.asarray(weights[0] )
lowerCAmelCase__ = np.asarray(weights[1] )
lowerCAmelCase__ = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(lowerCamelCase__ ).view(-1 , lowerCamelCase__ ).contiguous().transpose(0 , 1 ) , )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = np.asarray(weights[0] )
lowerCAmelCase__ = np.asarray(weights[1] )
lowerCAmelCase__ = np.asarray(weights[2] )
lowerCAmelCase__ = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(lowerCamelCase__ ).view(-1 , lowerCamelCase__ ).contiguous().transpose(0 , 1 ) , )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = weights[0][0][0]
lowerCAmelCase__ = np.asarray(layer_norm_a[0] )
lowerCAmelCase__ = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(lowerCamelCase__ ) , torch.tensor(lowerCamelCase__ ) , )
# lsh weights + output
lowerCAmelCase__ = weights[0][1]
if len(lowerCamelCase__ ) < 4:
set_layer_weights_in_torch_lsh(lowerCamelCase__ , torch_block.attention , lowerCamelCase__ )
else:
set_layer_weights_in_torch_local(lowerCamelCase__ , torch_block.attention , lowerCamelCase__ )
# intermediate weighs
lowerCAmelCase__ = weights[2][0][1][2]
# Chunked Feed Forward
if len(lowerCamelCase__ ) == 4:
lowerCAmelCase__ = intermediate_weights[2]
# layernorm 2
lowerCAmelCase__ = np.asarray(intermediate_weights[0][0] )
lowerCAmelCase__ = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(lowerCamelCase__ ) , torch.tensor(lowerCamelCase__ ) , )
# intermediate dense
lowerCAmelCase__ = np.asarray(intermediate_weights[1][0] )
lowerCAmelCase__ = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(lowerCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase__ ) , )
# intermediate out
lowerCAmelCase__ = np.asarray(intermediate_weights[4][0] )
lowerCAmelCase__ = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(lowerCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase__ ) , )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = torch_model.reformer
# word embeds
lowerCAmelCase__ = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(lowerCamelCase__ ) , )
if isinstance(weights[3] , lowerCamelCase__ ):
lowerCAmelCase__ = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
lowerCAmelCase__ = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f"""{position_embeddings[emb_idx]} emb does not match"""
lowerCAmelCase__ = nn.Parameter(torch.tensor(lowerCamelCase__ ) )
lowerCAmelCase__ = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
lowerCamelCase__ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
lowerCAmelCase__ = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# output layer norm
lowerCAmelCase__ = np.asarray(weights[7][0] )
lowerCAmelCase__ = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(lowerCamelCase__ ) , torch.tensor(lowerCamelCase__ ) , )
# output embeddings
lowerCAmelCase__ = np.asarray(weights[9][0] )
lowerCAmelCase__ = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(lowerCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase__ ) , )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = ReformerConfig.from_json_file(lowerCamelCase__ )
print(f"""Building PyTorch model from configuration: {config}""" )
lowerCAmelCase__ = ReformerModelWithLMHead(lowerCamelCase__ )
with open(lowerCamelCase__ , """rb""" ) as f:
lowerCAmelCase__ = pickle.load(lowerCamelCase__ )["""weights"""]
set_model_weights_in_torch(lowerCamelCase__ , lowerCamelCase__ , config.hidden_size )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , lowerCamelCase__ )
if __name__ == "__main__":
__lowerCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--trax_model_pkl_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained Reformer model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__lowerCAmelCase : Union[str, Any] = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 674 | 1 |
"""simple docstring"""
from __future__ import annotations
import os
from collections.abc import Mapping
A_ = tuple[int, int]
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self: Optional[Any] , UpperCamelCase_: List[Any] , UpperCamelCase_: List[Any] ):
UpperCamelCase_ =vertices
UpperCamelCase_ ={
(min(UpperCamelCase_ ), max(UpperCamelCase_ )): weight for edge, weight in edges.items()
}
def UpperCamelCase__ ( self: Dict , UpperCamelCase_: Optional[int] , UpperCamelCase_: int ):
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
UpperCamelCase_ =weight
def UpperCamelCase__ ( self: List[Any] ):
UpperCamelCase_ =Graph({min(self.vertices )} , {} )
UpperCamelCase_ =42
UpperCamelCase_ =42
UpperCamelCase_ =42
UpperCamelCase_ =42
while len(subgraph.vertices ) < len(self.vertices ):
UpperCamelCase_ =max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
UpperCamelCase_ =edge
UpperCamelCase_ =weight
subgraph.add_edge(UpperCamelCase_ , UpperCamelCase_ )
return subgraph
def _UpperCamelCase ( A = "p107_network.txt" ):
UpperCamelCase_ =os.path.abspath(os.path.dirname(__snake_case ) )
UpperCamelCase_ =os.path.join(__snake_case , __snake_case )
UpperCamelCase_ ={}
UpperCamelCase_ =42
UpperCamelCase_ =42
UpperCamelCase_ =42
with open(__snake_case ) as f:
UpperCamelCase_ =f.read().strip().split("\n" )
UpperCamelCase_ =[line.split("," ) for line in data]
for edgea in range(1 , len(__snake_case ) ):
for edgea in range(__snake_case ):
if adjaceny_matrix[edgea][edgea] != "-":
UpperCamelCase_ =int(adjaceny_matrix[edgea][edgea] )
UpperCamelCase_ =Graph(set(range(len(__snake_case ) ) ) , __snake_case )
UpperCamelCase_ =graph.prims_algorithm()
UpperCamelCase_ =sum(graph.edges.values() )
UpperCamelCase_ =sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(f'''{solution() = }''')
| 391 |
"""simple docstring"""
import random
def _snake_case ( __snake_case : List[Any] , __snake_case : List[Any] , __snake_case : int ):
"""simple docstring"""
_lowerCamelCase : List[str] = a[left_index]
_lowerCamelCase : Dict = left_index + 1
for j in range(left_index + 1 , __snake_case ):
if a[j] < pivot:
_lowerCamelCase , _lowerCamelCase : List[str] = a[i], a[j]
i += 1
_lowerCamelCase , _lowerCamelCase : Optional[int] = a[i - 1], a[left_index]
return i - 1
def _snake_case ( __snake_case : Tuple , __snake_case : List[str] , __snake_case : List[str] ):
"""simple docstring"""
if left < right:
_lowerCamelCase : Any = random.randint(__snake_case , right - 1 )
_lowerCamelCase , _lowerCamelCase : Optional[Any] = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
_lowerCamelCase : List[str] = partition(__snake_case , __snake_case , __snake_case )
quick_sort_random(
__snake_case , __snake_case , __snake_case ) # recursive quicksort to the left of the pivot point
quick_sort_random(
__snake_case , pivot_index + 1 , __snake_case ) # recursive quicksort to the right of the pivot point
def _snake_case ( ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = input("""Enter numbers separated by a comma:\n""" ).strip()
_lowerCamelCase : int = [int(__snake_case ) for item in user_input.split(""",""" )]
quick_sort_random(__snake_case , 0 , len(__snake_case ) )
print(__snake_case )
if __name__ == "__main__":
main()
| 88 | 0 |
"""simple docstring"""
from collections.abc import Generator
def __UpperCAmelCase ( ) -> Generator[int, None, None]:
lowercase__ , lowercase__ : int = 0, 1
while True:
lowercase__ , lowercase__ : int = b, a + b
yield b
def __UpperCAmelCase ( __lowerCamelCase = 10_00 ) -> int:
lowercase__ : Union[str, Any] = 1
lowercase__ : Any = fibonacci_generator()
while len(str(next(__lowerCamelCase ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 122 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __A ( metaclass=A_ ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = ["flax", "transformers"]
def __init__( self : Union[str, Any] ,*_snake_case : str ,**_snake_case : List[str] ) -> Any:
"""simple docstring"""
requires_backends(self ,['''flax''', '''transformers'''] )
@classmethod
def UpperCAmelCase ( cls : Any ,*_snake_case : List[Any] ,**_snake_case : Dict ) -> Any:
"""simple docstring"""
requires_backends(cls ,['''flax''', '''transformers'''] )
@classmethod
def UpperCAmelCase ( cls : List[str] ,*_snake_case : List[Any] ,**_snake_case : List[Any] ) -> Tuple:
"""simple docstring"""
requires_backends(cls ,['''flax''', '''transformers'''] )
class __A ( metaclass=A_ ):
'''simple docstring'''
lowerCAmelCase : str = ["flax", "transformers"]
def __init__( self : str ,*_snake_case : Union[str, Any] ,**_snake_case : Dict ) -> List[Any]:
"""simple docstring"""
requires_backends(self ,['''flax''', '''transformers'''] )
@classmethod
def UpperCAmelCase ( cls : List[str] ,*_snake_case : Any ,**_snake_case : str ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls ,['''flax''', '''transformers'''] )
@classmethod
def UpperCAmelCase ( cls : Optional[int] ,*_snake_case : List[Any] ,**_snake_case : int ) -> List[Any]:
"""simple docstring"""
requires_backends(cls ,['''flax''', '''transformers'''] )
class __A ( metaclass=A_ ):
'''simple docstring'''
lowerCAmelCase : Dict = ["flax", "transformers"]
def __init__( self : Any ,*_snake_case : str ,**_snake_case : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
requires_backends(self ,['''flax''', '''transformers'''] )
@classmethod
def UpperCAmelCase ( cls : List[Any] ,*_snake_case : int ,**_snake_case : Tuple ) -> List[str]:
"""simple docstring"""
requires_backends(cls ,['''flax''', '''transformers'''] )
@classmethod
def UpperCAmelCase ( cls : Union[str, Any] ,*_snake_case : Optional[Any] ,**_snake_case : Tuple ) -> int:
"""simple docstring"""
requires_backends(cls ,['''flax''', '''transformers'''] )
class __A ( metaclass=A_ ):
'''simple docstring'''
lowerCAmelCase : List[str] = ["flax", "transformers"]
def __init__( self : Optional[Any] ,*_snake_case : List[str] ,**_snake_case : List[Any] ) -> Optional[Any]:
"""simple docstring"""
requires_backends(self ,['''flax''', '''transformers'''] )
@classmethod
def UpperCAmelCase ( cls : Optional[Any] ,*_snake_case : int ,**_snake_case : Optional[int] ) -> List[str]:
"""simple docstring"""
requires_backends(cls ,['''flax''', '''transformers'''] )
@classmethod
def UpperCAmelCase ( cls : Tuple ,*_snake_case : Any ,**_snake_case : Dict ) -> List[str]:
"""simple docstring"""
requires_backends(cls ,['''flax''', '''transformers'''] )
| 122 | 1 |
"""simple docstring"""
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __a ( _lowerCAmelCase ):
def __init__( self : List[Any] , UpperCAmelCase_ : VQModel , UpperCAmelCase_ : UNetaDModel , UpperCAmelCase_ : DDIMScheduler )-> str:
"""simple docstring"""
super().__init__()
self.register_modules(vqvae=UpperCAmelCase_ , unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ )
@torch.no_grad()
def __call__( self : Optional[int] , UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : int = 50 , UpperCAmelCase_ : Optional[str] = "pil" , UpperCAmelCase_ : bool = True , **UpperCAmelCase_ : List[str] , )-> Union[Tuple, ImagePipelineOutput]:
"""simple docstring"""
UpperCamelCase = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=UpperCAmelCase_ , )
UpperCamelCase = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCamelCase = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(UpperCAmelCase_ )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
UpperCamelCase = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCamelCase = {}
if accepts_eta:
UpperCamelCase = eta
for t in self.progress_bar(self.scheduler.timesteps ):
UpperCamelCase = self.scheduler.scale_model_input(UpperCAmelCase_ , UpperCAmelCase_ )
# predict the noise residual
UpperCamelCase = self.unet(UpperCAmelCase_ , UpperCAmelCase_ ).sample
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase = self.scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ ).prev_sample
# decode the image latents with the VAE
UpperCamelCase = self.vqvae.decode(UpperCAmelCase_ ).sample
UpperCamelCase = (image / 2 + 0.5).clamp(0 , 1 )
UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCamelCase = self.numpy_to_pil(UpperCAmelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCAmelCase_ )
| 554 |
"""simple docstring"""
SCREAMING_SNAKE_CASE = {}
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )-> int:
"""simple docstring"""
# if we are absent twice, or late 3 consecutive days,
# no further prize strings are possible
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
UpperCamelCase = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
UpperCamelCase = _calculate(days - 1 , UpperCAmelCase_ , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
UpperCamelCase = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
UpperCamelCase = _calculate(days - 1 , UpperCAmelCase_ , 0 )
UpperCamelCase = state_late + state_absent + state_ontime
UpperCamelCase = prizestrings
return prizestrings
def lowerCamelCase__ ( UpperCAmelCase_ = 30 )-> int:
"""simple docstring"""
return _calculate(UpperCAmelCase_ , absent=0 , late=0 )
if __name__ == "__main__":
print(solution())
| 554 | 1 |
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowerCamelCase__ = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.14.0", "To fix: pip install -r examples/pytorch/audio-classification/requirements.txt")
def __A(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 1_6_0_0_0 ) -> int:
"""simple docstring"""
_UpperCamelCase = int(round(sample_rate * max_length ) )
if len(lowerCAmelCase ) <= sample_length:
return wav
_UpperCamelCase = randint(0 , len(lowerCAmelCase ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class lowerCAmelCase__ :
UpperCamelCase_ : Optional[str] = field(default=__lowercase , metadata={"help": "Name of a dataset from the datasets package"} )
UpperCamelCase_ : Optional[str] = field(
default=__lowercase , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
UpperCamelCase_ : Optional[str] = field(
default=__lowercase , metadata={"help": "A file containing the training audio paths and labels."} )
UpperCamelCase_ : Optional[str] = field(
default=__lowercase , metadata={"help": "A file containing the validation audio paths and labels."} )
UpperCamelCase_ : str = field(
default="train" , metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
} , )
UpperCamelCase_ : str = field(
default="validation" , metadata={
"help": (
"The name of the training data set split to use (via the datasets library). Defaults to 'validation'"
)
} , )
UpperCamelCase_ : str = field(
default="audio" , metadata={"help": "The name of the dataset column containing the audio data. Defaults to 'audio'"} , )
UpperCamelCase_ : str = field(
default="label" , metadata={"help": "The name of the dataset column containing the labels. Defaults to 'label'"} )
UpperCamelCase_ : Optional[int] = field(
default=__lowercase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
UpperCamelCase_ : Optional[int] = field(
default=__lowercase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
UpperCamelCase_ : float = field(
default=20 , metadata={"help": "Audio clips will be randomly cut to this length during training if the value is set."} , )
@dataclass
class lowerCAmelCase__ :
UpperCamelCase_ : str = field(
default="facebook/wav2vec2-base" , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} , )
UpperCamelCase_ : Optional[str] = field(
default=__lowercase , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
UpperCamelCase_ : Optional[str] = field(
default=__lowercase , metadata={"help": "Where do you want to store the pretrained models downloaded from the Hub"} )
UpperCamelCase_ : str = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
UpperCamelCase_ : Optional[str] = field(
default=__lowercase , metadata={"help": "Name or path of preprocessor config."} )
UpperCamelCase_ : bool = field(
default=__lowercase , metadata={"help": "Whether to freeze the feature encoder layers of the model."} )
UpperCamelCase_ : bool = field(
default=__lowercase , metadata={"help": "Whether to generate an attention mask in the feature extractor."} )
UpperCamelCase_ : bool = field(
default=__lowercase , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
UpperCamelCase_ : Optional[bool] = field(
default=__lowercase , metadata={"help": "Whether to freeze the feature extractor layers of the model."} )
UpperCamelCase_ : bool = field(
default=__lowercase , metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} , )
def A_ ( self ) -> Optional[Any]:
'''simple docstring'''
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
"""The argument `--freeze_feature_extractor` is deprecated and """
"""will be removed in a future version. Use `--freeze_feature_encoder`"""
"""instead. Setting `freeze_feature_encoder==True`.""" , a , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
"""The argument `--freeze_feature_extractor` is deprecated and """
"""should not be used in combination with `--freeze_feature_encoder`."""
"""Only make use of `--freeze_feature_encoder`.""" )
def __A() -> int:
"""simple docstring"""
_UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_audio_classification""" , lowerCAmelCase , lowerCAmelCase )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_UpperCamelCase = training_args.get_process_log_level()
logger.setLevel(lowerCAmelCase )
transformers.utils.logging.set_verbosity(lowerCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} '
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
_UpperCamelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_UpperCamelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
"""Use --overwrite_output_dir to train from scratch.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Initialize our dataset and prepare it for the audio classification task.
_UpperCamelCase = DatasetDict()
_UpperCamelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
_UpperCamelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F'--audio_column_name {data_args.audio_column_name} not found in dataset \'{data_args.dataset_name}\'. '
"""Make sure to set `--audio_column_name` to the correct audio column - one of """
F'{", ".join(raw_datasets["train"].column_names )}.' )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F'--label_column_name {data_args.label_column_name} not found in dataset \'{data_args.dataset_name}\'. '
"""Make sure to set `--label_column_name` to the correct text column - one of """
F'{", ".join(raw_datasets["train"].column_names )}.' )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
_UpperCamelCase = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
_UpperCamelCase = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
_UpperCamelCase = feature_extractor.model_input_names[0]
def train_transforms(lowerCAmelCase ):
_UpperCamelCase = []
for audio in batch[data_args.audio_column_name]:
_UpperCamelCase = random_subsample(
audio["""array"""] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(lowerCAmelCase )
_UpperCamelCase = feature_extractor(lowerCAmelCase , sampling_rate=feature_extractor.sampling_rate )
_UpperCamelCase = {model_input_name: inputs.get(lowerCAmelCase )}
_UpperCamelCase = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(lowerCAmelCase ):
_UpperCamelCase = [audio["""array"""] for audio in batch[data_args.audio_column_name]]
_UpperCamelCase = feature_extractor(lowerCAmelCase , sampling_rate=feature_extractor.sampling_rate )
_UpperCamelCase = {model_input_name: inputs.get(lowerCAmelCase )}
_UpperCamelCase = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
_UpperCamelCase = raw_datasets["""train"""].features[data_args.label_column_name].names
_UpperCamelCase , _UpperCamelCase = {}, {}
for i, label in enumerate(lowerCAmelCase ):
_UpperCamelCase = str(lowerCAmelCase )
_UpperCamelCase = label
# Load the accuracy metric from the datasets package
_UpperCamelCase = evaluate.load("""accuracy""" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(lowerCAmelCase ):
_UpperCamelCase = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=lowerCAmelCase , references=eval_pred.label_ids )
_UpperCamelCase = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(lowerCAmelCase ) , labelaid=lowerCAmelCase , idalabel=lowerCAmelCase , finetuning_task="""audio-classification""" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_UpperCamelCase = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowerCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
_UpperCamelCase = (
raw_datasets["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(lowerCAmelCase , output_all_columns=lowerCAmelCase )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
_UpperCamelCase = (
raw_datasets["""eval"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(lowerCAmelCase , output_all_columns=lowerCAmelCase )
# Initialize our trainer
_UpperCamelCase = Trainer(
model=lowerCAmelCase , args=lowerCAmelCase , train_dataset=raw_datasets["""train"""] if training_args.do_train else None , eval_dataset=raw_datasets["""eval"""] if training_args.do_eval else None , compute_metrics=lowerCAmelCase , tokenizer=lowerCAmelCase , )
# Training
if training_args.do_train:
_UpperCamelCase = None
if training_args.resume_from_checkpoint is not None:
_UpperCamelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_UpperCamelCase = last_checkpoint
_UpperCamelCase = trainer.train(resume_from_checkpoint=lowerCAmelCase )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
_UpperCamelCase = trainer.evaluate()
trainer.log_metrics("""eval""" , lowerCAmelCase )
trainer.save_metrics("""eval""" , lowerCAmelCase )
# Write model card and (optionally) push to hub
_UpperCamelCase = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """audio-classification""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""audio-classification"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCAmelCase )
else:
trainer.create_model_card(**lowerCAmelCase )
if __name__ == "__main__":
main()
| 202 |
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def __A(lowerCAmelCase ) -> List[str]:
"""simple docstring"""
if "model" in orig_key:
_UpperCamelCase = orig_key.replace("""model.""" , """""" )
if "norm1" in orig_key:
_UpperCamelCase = orig_key.replace("""norm1""" , """attention.output.LayerNorm""" )
if "norm2" in orig_key:
_UpperCamelCase = orig_key.replace("""norm2""" , """output.LayerNorm""" )
if "norm" in orig_key:
_UpperCamelCase = orig_key.replace("""norm""" , """LayerNorm""" )
if "transformer" in orig_key:
_UpperCamelCase = orig_key.split(""".""" )[0].split("""_""" )[-1]
_UpperCamelCase = orig_key.replace(F'transformer_{layer_num}' , F'encoder.layer.{layer_num}' )
if "mha.attn" in orig_key:
_UpperCamelCase = orig_key.replace("""mha.attn""" , """attention.self""" )
if "mha" in orig_key:
_UpperCamelCase = orig_key.replace("""mha""" , """attention""" )
if "W_q" in orig_key:
_UpperCamelCase = orig_key.replace("""W_q""" , """self.query""" )
if "W_k" in orig_key:
_UpperCamelCase = orig_key.replace("""W_k""" , """self.key""" )
if "W_v" in orig_key:
_UpperCamelCase = orig_key.replace("""W_v""" , """self.value""" )
if "ff1" in orig_key:
_UpperCamelCase = orig_key.replace("""ff1""" , """intermediate.dense""" )
if "ff2" in orig_key:
_UpperCamelCase = orig_key.replace("""ff2""" , """output.dense""" )
if "ff" in orig_key:
_UpperCamelCase = orig_key.replace("""ff""" , """output.dense""" )
if "mlm_class" in orig_key:
_UpperCamelCase = orig_key.replace("""mlm.mlm_class""" , """cls.predictions.decoder""" )
if "mlm" in orig_key:
_UpperCamelCase = orig_key.replace("""mlm""" , """cls.predictions.transform""" )
if "cls" not in orig_key:
_UpperCamelCase = """yoso.""" + orig_key
return orig_key
def __A(lowerCAmelCase , lowerCAmelCase ) -> List[str]:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
_UpperCamelCase = orig_state_dict.pop(lowerCAmelCase )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
_UpperCamelCase = val
_UpperCamelCase = orig_state_dict["""cls.predictions.decoder.bias"""]
_UpperCamelCase = torch.arange(lowerCAmelCase ).expand((1, -1) ) + 2
return orig_state_dict
def __A(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = torch.load(lowerCAmelCase , map_location="""cpu""" )["""model_state_dict"""]
_UpperCamelCase = YosoConfig.from_json_file(lowerCAmelCase )
_UpperCamelCase = YosoForMaskedLM(lowerCAmelCase )
_UpperCamelCase = convert_checkpoint_helper(config.max_position_embeddings , lowerCAmelCase )
print(model.load_state_dict(lowerCAmelCase ) )
model.eval()
model.save_pretrained(lowerCAmelCase )
print(F'Checkpoint successfuly converted. Model saved at {pytorch_dump_path}' )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--pytorch_model_path", default=None, type=str, required=True, help="Path to YOSO pytorch checkpoint."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The json file for YOSO model config.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
lowerCamelCase__ = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 202 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = "▁"
SCREAMING_SNAKE_CASE_ = {"vocab_file": "sentencepiece.bpe.model"}
SCREAMING_SNAKE_CASE_ = {
"vocab_file": {
"xlm-roberta-base": "https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model",
"xlm-roberta-large": "https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model",
"xlm-roberta-large-finetuned-conll02-dutch": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll02-spanish": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll03-english": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll03-german": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model"
),
}
}
SCREAMING_SNAKE_CASE_ = {
"xlm-roberta-base": 512,
"xlm-roberta-large": 512,
"xlm-roberta-large-finetuned-conll02-dutch": 512,
"xlm-roberta-large-finetuned-conll02-spanish": 512,
"xlm-roberta-large-finetuned-conll03-english": 512,
"xlm-roberta-large-finetuned-conll03-german": 512,
}
class lowerCAmelCase_ ( _UpperCAmelCase ):
'''simple docstring'''
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case = ['''input_ids''', '''attention_mask''']
def __init__( self , snake_case_ , snake_case_="<s>" , snake_case_="</s>" , snake_case_="</s>" , snake_case_="<s>" , snake_case_="<unk>" , snake_case_="<pad>" , snake_case_="<mask>" , snake_case_ = None , **snake_case_ , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
__lowerCAmelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else mask_token
__lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A_ , eos_token=A_ , unk_token=A_ , sep_token=A_ , cls_token=A_ , pad_token=A_ , mask_token=A_ , sp_model_kwargs=self.sp_model_kwargs , **A_ , )
__lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(A_ ) )
__lowerCAmelCase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
__lowerCAmelCase = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
__lowerCAmelCase = 1
__lowerCAmelCase = len(self.sp_model ) + self.fairseq_offset
__lowerCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> Union[str, Any]:
__lowerCAmelCase = self.__dict__.copy()
__lowerCAmelCase = None
__lowerCAmelCase = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , snake_case_ ) -> str:
__lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__lowerCAmelCase = {}
__lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def A__ ( self , snake_case_ , snake_case_ = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__lowerCAmelCase = [self.cls_token_id]
__lowerCAmelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A__ ( self , snake_case_ , snake_case_ = None , snake_case_ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ )
if token_ids_a is None:
return [1] + ([0] * len(A_ )) + [1]
return [1] + ([0] * len(A_ )) + [1, 1] + ([0] * len(A_ )) + [1]
def A__ ( self , snake_case_ , snake_case_ = None ) -> List[int]:
__lowerCAmelCase = [self.sep_token_id]
__lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def A__ ( self ) -> str:
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def A__ ( self ) -> Union[str, Any]:
__lowerCAmelCase = {self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def A__ ( self , snake_case_ ) -> List[str]:
return self.sp_model.encode(A_ , out_type=A_ )
def A__ ( self , snake_case_ ) -> Union[str, Any]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__lowerCAmelCase = self.sp_model.PieceToId(A_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def A__ ( self , snake_case_ ) -> str:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def A__ ( self , snake_case_ ) -> Any:
__lowerCAmelCase = """""".join(A_ ).replace(A_ , """ """ ).strip()
return out_string
def A__ ( self , snake_case_ , snake_case_ = None ) -> Tuple[str]:
if not os.path.isdir(A_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowerCAmelCase = os.path.join(
A_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A_ )
elif not os.path.isfile(self.vocab_file ):
with open(A_ , """wb""" ) as fi:
__lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(A_ )
return (out_vocab_file,)
| 465 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase : Union[str, Any] = {"configuration_ibert": ["IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "IBertConfig", "IBertOnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Any = [
"IBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"IBertForMaskedLM",
"IBertForMultipleChoice",
"IBertForQuestionAnswering",
"IBertForSequenceClassification",
"IBertForTokenClassification",
"IBertModel",
"IBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
__lowercase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 564 | 0 |
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
A_ = TypeVar("T")
def __UpperCAmelCase ( UpperCAmelCase )-> List[Any]:
"""simple docstring"""
return (position - 1) // 2
def __UpperCAmelCase ( UpperCAmelCase )-> List[str]:
"""simple docstring"""
return (2 * position) + 1
def __UpperCAmelCase ( UpperCAmelCase )-> Dict:
"""simple docstring"""
return (2 * position) + 2
class __lowercase ( Generic[T] ):
def __init__( self : int ) -> None:
'''simple docstring'''
lowercase = []
lowercase = {}
lowercase = 0
def __len__( self : Optional[int] ) -> int:
'''simple docstring'''
return self.elements
def __repr__( self : str ) -> str:
'''simple docstring'''
return str(self.heap )
def __a ( self : Optional[Any] ) -> bool:
'''simple docstring'''
return self.elements == 0
def __a ( self : List[str] , __lowerCamelCase : T , __lowerCamelCase : int ) -> None:
'''simple docstring'''
self.heap.append((elem, weight) )
lowercase = self.elements
self.elements += 1
self._bubble_up(_UpperCAmelCase )
def __a ( self : int ) -> T:
'''simple docstring'''
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
lowercase ,lowercase = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
lowercase ,lowercase = self.heap[0]
self._bubble_down(_UpperCAmelCase )
return elem
def __a ( self : Dict , __lowerCamelCase : T , __lowerCamelCase : int ) -> None:
'''simple docstring'''
lowercase = self.position_map[elem]
lowercase = (elem, weight)
if position > 0:
lowercase = get_parent_position(_UpperCAmelCase )
lowercase ,lowercase = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(_UpperCAmelCase )
else:
self._bubble_down(_UpperCAmelCase )
else:
self._bubble_down(_UpperCAmelCase )
def __a ( self : Union[str, Any] , __lowerCamelCase : T ) -> None:
'''simple docstring'''
lowercase = self.position_map[elem]
if curr_pos == 0:
return None
lowercase = get_parent_position(_UpperCAmelCase )
lowercase ,lowercase = self.heap[curr_pos]
lowercase ,lowercase = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(_UpperCAmelCase , _UpperCAmelCase )
return self._bubble_up(_UpperCAmelCase )
return None
def __a ( self : Any , __lowerCamelCase : T ) -> None:
'''simple docstring'''
lowercase = self.position_map[elem]
lowercase ,lowercase = self.heap[curr_pos]
lowercase = get_child_left_position(_UpperCAmelCase )
lowercase = get_child_right_position(_UpperCAmelCase )
if child_left_position < self.elements and child_right_position < self.elements:
lowercase ,lowercase = self.heap[child_left_position]
lowercase ,lowercase = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(_UpperCAmelCase , _UpperCAmelCase )
return self._bubble_down(_UpperCAmelCase )
if child_left_position < self.elements:
lowercase ,lowercase = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(_UpperCAmelCase , _UpperCAmelCase )
return self._bubble_down(_UpperCAmelCase )
else:
return None
if child_right_position < self.elements:
lowercase ,lowercase = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(_UpperCAmelCase , _UpperCAmelCase )
return self._bubble_down(_UpperCAmelCase )
return None
def __a ( self : Tuple , __lowerCamelCase : int , __lowerCamelCase : int ) -> None:
'''simple docstring'''
lowercase = self.heap[nodea_pos][0]
lowercase = self.heap[nodea_pos][0]
lowercase ,lowercase = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
lowercase = nodea_pos
lowercase = nodea_pos
class __lowercase ( Generic[T] ):
def __init__( self : Any ) -> None:
'''simple docstring'''
lowercase = {}
lowercase = 0
def __repr__( self : List[str] ) -> str:
'''simple docstring'''
return str(self.connections )
def __len__( self : Optional[Any] ) -> int:
'''simple docstring'''
return self.nodes
def __a ( self : int , __lowerCamelCase : T ) -> None:
'''simple docstring'''
if node not in self.connections:
lowercase = {}
self.nodes += 1
def __a ( self : int , __lowerCamelCase : T , __lowerCamelCase : T , __lowerCamelCase : int ) -> None:
'''simple docstring'''
self.add_node(_UpperCAmelCase )
self.add_node(_UpperCAmelCase )
lowercase = weight
lowercase = weight
def __UpperCAmelCase ( UpperCAmelCase, )-> Union[str, Any]:
"""simple docstring"""
lowercase = {node: maxsize for node in graph.connections}
lowercase = {node: None for node in graph.connections}
lowercase = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(lowerCAmelCase__, lowerCAmelCase__ )
if priority_queue.is_empty():
return dist, parent
# initialization
lowercase = priority_queue.extract_min()
lowercase = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
lowercase = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(lowerCAmelCase__, dist[neighbour] )
lowercase = node
# running prim's algorithm
while not priority_queue.is_empty():
lowercase = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
lowercase = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(lowerCAmelCase__, dist[neighbour] )
lowercase = node
return dist, parent
| 714 | from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def __UpperCAmelCase ( UpperCAmelCase )-> bool:
"""simple docstring"""
lowercase = int(number**0.5 )
return number == sq * sq
def __UpperCAmelCase ( UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase )-> tuple[int, int]:
"""simple docstring"""
lowercase = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
lowercase = x_den * y_den * z_den
lowercase = gcd(UpperCAmelCase, UpperCAmelCase )
top //= hcf
bottom //= hcf
return top, bottom
def __UpperCAmelCase ( UpperCAmelCase = 35 )-> int:
"""simple docstring"""
lowercase = set()
lowercase = 42
lowercase = Fraction(0 )
lowercase = 42
for x_num in range(1, order + 1 ):
for x_den in range(x_num + 1, order + 1 ):
for y_num in range(1, order + 1 ):
for y_den in range(y_num + 1, order + 1 ):
# n=1
lowercase = x_num * y_den + x_den * y_num
lowercase = x_den * y_den
lowercase = gcd(UpperCAmelCase, UpperCAmelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowercase = add_three(
UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase )
unique_s.add(UpperCAmelCase )
# n=2
lowercase = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
lowercase = x_den * x_den * y_den * y_den
if is_sq(UpperCAmelCase ) and is_sq(UpperCAmelCase ):
lowercase = int(sqrt(UpperCAmelCase ) )
lowercase = int(sqrt(UpperCAmelCase ) )
lowercase = gcd(UpperCAmelCase, UpperCAmelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowercase = add_three(
UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase )
unique_s.add(UpperCAmelCase )
# n=-1
lowercase = x_num * y_num
lowercase = x_den * y_num + x_num * y_den
lowercase = gcd(UpperCAmelCase, UpperCAmelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowercase = add_three(
UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase )
unique_s.add(UpperCAmelCase )
# n=2
lowercase = x_num * x_num * y_num * y_num
lowercase = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(UpperCAmelCase ) and is_sq(UpperCAmelCase ):
lowercase = int(sqrt(UpperCAmelCase ) )
lowercase = int(sqrt(UpperCAmelCase ) )
lowercase = gcd(UpperCAmelCase, UpperCAmelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowercase = add_three(
UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase )
unique_s.add(UpperCAmelCase )
for num, den in unique_s:
total += Fraction(UpperCAmelCase, UpperCAmelCase )
return total.denominator + total.numerator
if __name__ == "__main__":
print(F"{solution() = }")
| 479 | 0 |
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase__ = get_tests_dir("fixtures/test_sentencepiece.model")
lowerCamelCase__ = get_tests_dir("fixtures/test_sentencepiece_bpe.model")
lowerCamelCase__ = "pt" if is_torch_available() else "tf"
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__ ( __lowercase , unittest.TestCase ):
UpperCamelCase_ : Optional[int] = CamembertTokenizer
UpperCamelCase_ : int = CamembertTokenizerFast
UpperCamelCase_ : Any = True
UpperCamelCase_ : Optional[Any] = True
def A_ ( self ) -> List[Any]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCamelCase = CamembertTokenizer(a )
tokenizer.save_pretrained(self.tmpdirname )
def A_ ( self ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = """<pad>"""
_UpperCamelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a ) , a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a ) , a )
def A_ ( self ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>NOTUSED""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(a ) , 10_04 )
def A_ ( self ) -> List[Any]:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 10_05 )
def A_ ( self ) -> Dict:
'''simple docstring'''
_UpperCamelCase = CamembertTokenizer(a )
tokenizer.save_pretrained(self.tmpdirname )
_UpperCamelCase = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
_UpperCamelCase = """I was born in 92000, and this is falsé."""
_UpperCamelCase = tokenizer.encode(a )
_UpperCamelCase = rust_tokenizer.encode(a )
self.assertListEqual(a , a )
_UpperCamelCase = tokenizer.encode(a , add_special_tokens=a )
_UpperCamelCase = rust_tokenizer.encode(a , add_special_tokens=a )
self.assertListEqual(a , a )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
_UpperCamelCase = tokenizer.convert_ids_to_tokens(a )
_UpperCamelCase = rust_tokenizer.tokenize(a )
self.assertListEqual(a , a )
def A_ ( self ) -> Optional[Any]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = self.get_rust_tokenizer()
_UpperCamelCase = """I was born in 92000, and this is falsé."""
_UpperCamelCase = tokenizer.tokenize(a )
_UpperCamelCase = rust_tokenizer.tokenize(a )
self.assertListEqual(a , a )
_UpperCamelCase = tokenizer.encode(a , add_special_tokens=a )
_UpperCamelCase = rust_tokenizer.encode(a , add_special_tokens=a )
self.assertListEqual(a , a )
_UpperCamelCase = self.get_rust_tokenizer()
_UpperCamelCase = tokenizer.encode(a )
_UpperCamelCase = rust_tokenizer.encode(a )
self.assertListEqual(a , a )
@slow
def A_ ( self ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = {"""input_ids""": [[5, 54, 71_96, 2_97, 30, 23, 7_76, 18, 11, 32_15, 37_05, 82_52, 22, 31_64, 11_81, 21_16, 29, 16, 8_13, 25, 7_91, 33_14, 20, 34_46, 38, 2_75_75, 1_20, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 4_68, 17, 11, 90_88, 20, 15_17, 8, 2_28_04, 1_88_18, 10, 38, 6_29, 6_07, 6_07, 1_42, 19, 71_96, 8_67, 56, 1_03_26, 24, 22_67, 20, 4_16, 50_72, 1_56_12, 2_33, 7_34, 7, 23_99, 27, 16, 30_15, 16_49, 7, 24, 20, 43_38, 23_99, 27, 13, 34_00, 14, 13, 61_89, 8, 9_30, 9, 6]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
_UpperCamelCase = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=a , model_name="""camembert-base""" , revision="""3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf""" , sequences=a , )
| 612 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCamelCase__ = logging.get_logger(__name__)
class lowerCAmelCase__ ( __lowercase ):
UpperCamelCase_ : Dict = ["pixel_values"]
def __init__( self , a = True , a = None , a = None , a = PILImageResampling.BILINEAR , a = True , a = 1 / 2_55 , a = True , a = None , a = None , **a , ) -> None:
'''simple docstring'''
super().__init__(**a )
_UpperCamelCase = size if size is not None else {"""shortest_edge""": 3_84}
_UpperCamelCase = get_size_dict(a , default_to_square=a )
_UpperCamelCase = do_resize
_UpperCamelCase = size
# Default value set here for backwards compatibility where the value in config is None
_UpperCamelCase = crop_pct if crop_pct is not None else 2_24 / 2_56
_UpperCamelCase = resample
_UpperCamelCase = do_rescale
_UpperCamelCase = rescale_factor
_UpperCamelCase = do_normalize
_UpperCamelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_UpperCamelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def A_ ( self , a , a , a , a = PILImageResampling.BICUBIC , a = None , **a , ) -> np.ndarray:
'''simple docstring'''
_UpperCamelCase = get_size_dict(a , default_to_square=a )
if "shortest_edge" not in size:
raise ValueError(F'Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}' )
_UpperCamelCase = size["""shortest_edge"""]
if shortest_edge < 3_84:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
_UpperCamelCase = int(shortest_edge / crop_pct )
_UpperCamelCase = get_resize_output_image_size(a , size=a , default_to_square=a )
_UpperCamelCase = resize(image=a , size=a , resample=a , data_format=a , **a )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=a , size=(shortest_edge, shortest_edge) , data_format=a , **a )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
a , size=(shortest_edge, shortest_edge) , resample=a , data_format=a , **a )
def A_ ( self , a , a , a = None , **a , ) -> Optional[int]:
'''simple docstring'''
return rescale(a , scale=a , data_format=a , **a )
def A_ ( self , a , a , a , a = None , **a , ) -> np.ndarray:
'''simple docstring'''
return normalize(a , mean=a , std=a , data_format=a , **a )
def A_ ( self , a , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = ChannelDimension.FIRST , **a , ) -> PIL.Image.Image:
'''simple docstring'''
_UpperCamelCase = do_resize if do_resize is not None else self.do_resize
_UpperCamelCase = crop_pct if crop_pct is not None else self.crop_pct
_UpperCamelCase = resample if resample is not None else self.resample
_UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCamelCase = image_mean if image_mean is not None else self.image_mean
_UpperCamelCase = image_std if image_std is not None else self.image_std
_UpperCamelCase = size if size is not None else self.size
_UpperCamelCase = get_size_dict(a , default_to_square=a )
_UpperCamelCase = make_list_of_images(a )
if not valid_images(a ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_resize and size["shortest_edge"] < 3_84 and crop_pct is None:
raise ValueError("""crop_pct must be specified if size < 384.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
_UpperCamelCase = [to_numpy_array(a ) for image in images]
if do_resize:
_UpperCamelCase = [self.resize(image=a , size=a , crop_pct=a , resample=a ) for image in images]
if do_rescale:
_UpperCamelCase = [self.rescale(image=a , scale=a ) for image in images]
if do_normalize:
_UpperCamelCase = [self.normalize(image=a , mean=a , std=a ) for image in images]
_UpperCamelCase = [to_channel_dimension_format(a , a ) for image in images]
_UpperCamelCase = {"""pixel_values""": images}
return BatchFeature(data=a , tensor_type=a )
| 612 | 1 |
"""simple docstring"""
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
snake_case_ : List[str] = (
"""4S 3H 2C 7S 5H""",
"""9D 8H 2C 6S 7H""",
"""2D 6D 9D TH 7D""",
"""TC 8C 2S JH 6C""",
"""JH 8S TH AH QH""",
"""TS KS 5S 9S AC""",
"""KD 6S 9D TH AD""",
"""KS 8D 4D 9S 4S""", # pair
"""8C 4S KH JS 4D""", # pair
"""QH 8H KD JH 8S""", # pair
"""KC 4H KS 2H 8D""", # pair
"""KD 4S KC 3H 8S""", # pair
"""AH 8S AS KC JH""", # pair
"""3H 4C 4H 3S 2H""", # 2 pairs
"""5S 5D 2C KH KH""", # 2 pairs
"""3C KH 5D 5S KH""", # 2 pairs
"""AS 3C KH AD KH""", # 2 pairs
"""7C 7S 3S 7H 5S""", # 3 of a kind
"""7C 7S KH 2H 7H""", # 3 of a kind
"""AC KH QH AH AS""", # 3 of a kind
"""2H 4D 3C AS 5S""", # straight (low ace)
"""3C 5C 4C 2C 6H""", # straight
"""6S 8S 7S 5H 9H""", # straight
"""JS QS 9H TS KH""", # straight
"""QC KH TS JS AH""", # straight (high ace)
"""8C 9C 5C 3C TC""", # flush
"""3S 8S 9S 5S KS""", # flush
"""4C 5C 9C 8C KC""", # flush
"""JH 8H AH KH QH""", # flush
"""3D 2H 3H 2C 2D""", # full house
"""2H 2C 3S 3H 3D""", # full house
"""KH KC 3S 3H 3D""", # full house
"""JC 6H JS JD JH""", # 4 of a kind
"""JC 7H JS JD JH""", # 4 of a kind
"""JC KH JS JD JH""", # 4 of a kind
"""2S AS 4S 5S 3S""", # straight flush (low ace)
"""2D 6D 3D 4D 5D""", # straight flush
"""5C 6C 3C 7C 4C""", # straight flush
"""JH 9H TH KH QH""", # straight flush
"""JH AH TH KH QH""", # royal flush (high ace straight flush)
)
snake_case_ : str = (
("""2H 3H 4H 5H 6H""", """KS AS TS QS JS""", """Loss"""),
("""2H 3H 4H 5H 6H""", """AS AD AC AH JD""", """Win"""),
("""AS AH 2H AD AC""", """JS JD JC JH 3D""", """Win"""),
("""2S AH 2H AS AC""", """JS JD JC JH AD""", """Loss"""),
("""2S AH 2H AS AC""", """2H 3H 5H 6H 7H""", """Win"""),
("""AS 3S 4S 8S 2S""", """2H 3H 5H 6H 7H""", """Win"""),
("""2H 3H 5H 6H 7H""", """2S 3H 4H 5S 6C""", """Win"""),
("""2S 3H 4H 5S 6C""", """3D 4C 5H 6H 2S""", """Tie"""),
("""2S 3H 4H 5S 6C""", """AH AC 5H 6H AS""", """Win"""),
("""2S 2H 4H 5S 4C""", """AH AC 5H 6H AS""", """Loss"""),
("""2S 2H 4H 5S 4C""", """AH AC 5H 6H 7S""", """Win"""),
("""6S AD 7H 4S AS""", """AH AC 5H 6H 7S""", """Loss"""),
("""2S AH 4H 5S KC""", """AH AC 5H 6H 7S""", """Loss"""),
("""2S 3H 6H 7S 9C""", """7H 3C TH 6H 9S""", """Loss"""),
("""4S 5H 6H TS AC""", """3S 5H 6H TS AC""", """Win"""),
("""2S AH 4H 5S 6C""", """AD 4C 5H 6H 2C""", """Tie"""),
("""AS AH 3H AD AC""", """AS AH 2H AD AC""", """Win"""),
("""AH AC 5H 5C QS""", """AH AC 5H 5C KS""", """Loss"""),
("""AH AC 5H 5C QS""", """KH KC 5H 5C QS""", """Win"""),
("""7C 7S KH 2H 7H""", """3C 3S AH 2H 3H""", """Win"""),
("""3C 3S AH 2H 3H""", """7C 7S KH 2H 7H""", """Loss"""),
("""6H 5H 4H 3H 2H""", """5H 4H 3H 2H AH""", """Win"""),
("""5H 4H 3H 2H AH""", """5H 4H 3H 2H AH""", """Tie"""),
("""5H 4H 3H 2H AH""", """6H 5H 4H 3H 2H""", """Loss"""),
("""AH AD KS KC AC""", """AH KD KH AC KC""", """Win"""),
("""2H 4D 3C AS 5S""", """2H 4D 3C 6S 5S""", """Loss"""),
("""2H 3S 3C 3H 2S""", """3S 3C 2S 2H 2D""", """Win"""),
("""4D 6D 5D 2D JH""", """3S 8S 3H TC KH""", """Loss"""),
("""4S 6C 8S 3S 7S""", """AD KS 2D 7D 7C""", """Loss"""),
("""6S 4C 7H 8C 3H""", """5H JC AH 9D 9C""", """Loss"""),
("""9D 9H JH TC QH""", """3C 2S JS 5C 7H""", """Win"""),
("""2H TC 8S AD 9S""", """4H TS 7H 2C 5C""", """Win"""),
("""9D 3S 2C 7S 7C""", """JC TD 3C TC 9H""", """Loss"""),
)
snake_case_ : Union[str, Any] = (
("""2H 3H 4H 5H 6H""", True),
("""AS AH 2H AD AC""", False),
("""2H 3H 5H 6H 7H""", True),
("""KS AS TS QS JS""", True),
("""8H 9H QS JS TH""", False),
("""AS 3S 4S 8S 2S""", True),
)
snake_case_ : int = (
("""2H 3H 4H 5H 6H""", True),
("""AS AH 2H AD AC""", False),
("""2H 3H 5H 6H 7H""", False),
("""KS AS TS QS JS""", True),
("""8H 9H QS JS TH""", True),
)
snake_case_ : Any = (
("""2H 4D 3C AS 5S""", True, [5, 4, 3, 2, 1_4]),
("""2H 5D 3C AS 5S""", False, [1_4, 5, 5, 3, 2]),
("""JH QD KC AS TS""", False, [1_4, 1_3, 1_2, 1_1, 1_0]),
("""9D 3S 2C 7S 7C""", False, [9, 7, 7, 3, 2]),
)
snake_case_ : Union[str, Any] = (
("""JH AH TH KH QH""", 0),
("""JH 9H TH KH QH""", 0),
("""JC KH JS JD JH""", 7),
("""KH KC 3S 3H 3D""", 6),
("""8C 9C 5C 3C TC""", 0),
("""JS QS 9H TS KH""", 0),
("""7C 7S KH 2H 7H""", 3),
("""3C KH 5D 5S KH""", 2),
("""QH 8H KD JH 8S""", 1),
("""2D 6D 9D TH 7D""", 0),
)
snake_case_ : Optional[Any] = (
("""JH AH TH KH QH""", 2_3),
("""JH 9H TH KH QH""", 2_2),
("""JC KH JS JD JH""", 2_1),
("""KH KC 3S 3H 3D""", 2_0),
("""8C 9C 5C 3C TC""", 1_9),
("""JS QS 9H TS KH""", 1_8),
("""7C 7S KH 2H 7H""", 1_7),
("""3C KH 5D 5S KH""", 1_6),
("""QH 8H KD JH 8S""", 1_5),
("""2D 6D 9D TH 7D""", 1_4),
)
def lowercase_ ( ):
'''simple docstring'''
UpperCAmelCase : List[Any] = randrange(len(_lowercase ) ), randrange(len(_lowercase ) )
UpperCAmelCase : Dict = ["Loss", "Tie", "Win"][(play >= oppo) + (play > oppo)]
UpperCAmelCase : Optional[Any] = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def lowercase_ ( _lowercase : int = 1_00 ):
'''simple docstring'''
return (generate_random_hand() for _ in range(_lowercase ))
@pytest.mark.parametrize("hand, expected" , _lowercase )
def lowercase_ ( _lowercase : List[Any] , _lowercase : List[str] ):
'''simple docstring'''
assert PokerHand(_lowercase )._is_flush() == expected
@pytest.mark.parametrize("hand, expected" , _lowercase )
def lowercase_ ( _lowercase : str , _lowercase : str ):
'''simple docstring'''
assert PokerHand(_lowercase )._is_straight() == expected
@pytest.mark.parametrize("hand, expected, card_values" , _lowercase )
def lowercase_ ( _lowercase : str , _lowercase : int , _lowercase : List[Any] ):
'''simple docstring'''
UpperCAmelCase : Tuple = PokerHand(_lowercase )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize("hand, expected" , _lowercase )
def lowercase_ ( _lowercase : Optional[int] , _lowercase : Any ):
'''simple docstring'''
assert PokerHand(_lowercase )._is_same_kind() == expected
@pytest.mark.parametrize("hand, expected" , _lowercase )
def lowercase_ ( _lowercase : Union[str, Any] , _lowercase : Dict ):
'''simple docstring'''
assert PokerHand(_lowercase )._hand_type == expected
@pytest.mark.parametrize("hand, other, expected" , _lowercase )
def lowercase_ ( _lowercase : Any , _lowercase : List[str] , _lowercase : Dict ):
'''simple docstring'''
assert PokerHand(_lowercase ).compare_with(PokerHand(_lowercase ) ) == expected
@pytest.mark.parametrize("hand, other, expected" , generate_random_hands() )
def lowercase_ ( _lowercase : str , _lowercase : str , _lowercase : Union[str, Any] ):
'''simple docstring'''
assert PokerHand(_lowercase ).compare_with(PokerHand(_lowercase ) ) == expected
def lowercase_ ( ):
'''simple docstring'''
UpperCAmelCase : List[Any] = [PokerHand(_lowercase ) for hand in SORTED_HANDS]
UpperCAmelCase : Any = poker_hands.copy()
shuffle(_lowercase )
UpperCAmelCase : Any = chain(sorted(_lowercase ) )
for index, hand in enumerate(_lowercase ):
assert hand == poker_hands[index]
def lowercase_ ( ):
'''simple docstring'''
UpperCAmelCase : Any = [PokerHand("2D AC 3H 4H 5S" ), PokerHand("2S 3H 4H 5S 6C" )]
pokerhands.sort(reverse=_lowercase )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def lowercase_ ( ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = PokerHand("2C 4S AS 3D 5C" )
UpperCAmelCase : Optional[Any] = True
UpperCAmelCase : Optional[Any] = [5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def lowercase_ ( ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = 0
UpperCAmelCase : Optional[Any] = os.path.abspath(os.path.dirname(_lowercase ) )
UpperCAmelCase : List[str] = os.path.join(_lowercase , "poker_hands.txt" )
with open(_lowercase ) as file_hand:
for line in file_hand:
UpperCAmelCase : str = line[:14].strip()
UpperCAmelCase : int = line[15:].strip()
UpperCAmelCase : Union[str, Any] = PokerHand(_lowercase ), PokerHand(_lowercase )
UpperCAmelCase : str = player.compare_with(_lowercase )
if output == "Win":
answer += 1
assert answer == 3_76
| 708 |
"""simple docstring"""
def lowercase_ ( _lowercase : str ):
'''simple docstring'''
if not all(x.isalpha() for x in string ):
raise ValueError("String must only contain alphabetic characters." )
UpperCAmelCase : Optional[int] = sorted(string.lower() )
return len(_lowercase ) == len(set(_lowercase ) )
if __name__ == "__main__":
snake_case_ : Tuple = input("""Enter a string """).strip()
snake_case_ : str = is_isogram(input_str)
print(f'''{input_str} is {"an" if isogram else "not an"} isogram.''')
| 292 | 0 |
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class lowerCamelCase_ :
def __init__( self , __lowerCAmelCase = None ):
"""simple docstring"""
if components is None:
__magic_name__ :List[Any] = []
__magic_name__ :str = list(__lowerCAmelCase )
def __len__( self ):
"""simple docstring"""
return len(self.__components )
def __str__( self ):
"""simple docstring"""
return "(" + ",".join(map(__lowerCAmelCase , self.__components ) ) + ")"
def __add__( self , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Dict = len(self )
if size == len(__lowerCAmelCase ):
__magic_name__ :Dict = [self.__components[i] + other.component(__lowerCAmelCase ) for i in range(__lowerCAmelCase )]
return Vector(__lowerCAmelCase )
else:
raise Exception('''must have the same size''' )
def __sub__( self , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = len(self )
if size == len(__lowerCAmelCase ):
__magic_name__ :Tuple = [self.__components[i] - other.component(__lowerCAmelCase ) for i in range(__lowerCAmelCase )]
return Vector(__lowerCAmelCase )
else: # error case
raise Exception('''must have the same size''' )
@overload
def __mul__( self , __lowerCAmelCase ):
"""simple docstring"""
...
@overload
def __mul__( self , __lowerCAmelCase ):
"""simple docstring"""
...
def __mul__( self , __lowerCAmelCase ):
"""simple docstring"""
if isinstance(__lowerCAmelCase , (float, int) ):
__magic_name__ :List[str] = [c * other for c in self.__components]
return Vector(__lowerCAmelCase )
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ) and len(self ) == len(__lowerCAmelCase ):
__magic_name__ :Optional[int] = len(self )
__magic_name__ :Union[str, Any] = [self.__components[i] * other.component(__lowerCAmelCase ) for i in range(__lowerCAmelCase )]
return sum(__lowerCAmelCase )
else: # error case
raise Exception('''invalid operand!''' )
def A ( self ):
"""simple docstring"""
return Vector(self.__components )
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
if isinstance(__lowerCAmelCase , __lowerCAmelCase ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception('''index out of range''' )
def A ( self , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
assert -len(self.__components ) <= pos < len(self.__components )
__magic_name__ :Optional[int] = value
def A ( self ):
"""simple docstring"""
if len(self.__components ) == 0:
raise Exception('''Vector is empty''' )
__magic_name__ :Dict = [c**2 for c in self.__components]
return math.sqrt(sum(__lowerCAmelCase ) )
def A ( self , __lowerCAmelCase , __lowerCAmelCase = False ):
"""simple docstring"""
__magic_name__ :str = self * other
__magic_name__ :int = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def __lowercase ( snake_case ):
"""simple docstring"""
assert isinstance(snake_case, snake_case )
return Vector([0] * dimension )
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
assert isinstance(snake_case, snake_case ) and (isinstance(snake_case, snake_case ))
__magic_name__ :List[str] = [0] * dimension
__magic_name__ :int = 1
return Vector(snake_case )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
assert (
isinstance(snake_case, snake_case )
and isinstance(snake_case, snake_case )
and (isinstance(snake_case, (int, float) ))
)
return x * scalar + y
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
random.seed(snake_case )
__magic_name__ :Any = [random.randint(snake_case, snake_case ) for _ in range(snake_case )]
return Vector(snake_case )
class lowerCamelCase_ :
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Optional[Any] = matrix
__magic_name__ :Union[str, Any] = w
__magic_name__ :Union[str, Any] = h
def __str__( self ):
"""simple docstring"""
__magic_name__ :List[Any] = ''''''
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self , __lowerCAmelCase ):
"""simple docstring"""
if self.__width == other.width() and self.__height == other.height():
__magic_name__ :str = []
for i in range(self.__height ):
__magic_name__ :List[str] = [
self.__matrix[i][j] + other.component(__lowerCAmelCase , __lowerCAmelCase )
for j in range(self.__width )
]
matrix.append(__lowerCAmelCase )
return Matrix(__lowerCAmelCase , self.__width , self.__height )
else:
raise Exception('''matrix must have the same dimension!''' )
def __sub__( self , __lowerCAmelCase ):
"""simple docstring"""
if self.__width == other.width() and self.__height == other.height():
__magic_name__ :int = []
for i in range(self.__height ):
__magic_name__ :Tuple = [
self.__matrix[i][j] - other.component(__lowerCAmelCase , __lowerCAmelCase )
for j in range(self.__width )
]
matrix.append(__lowerCAmelCase )
return Matrix(__lowerCAmelCase , self.__width , self.__height )
else:
raise Exception('''matrices must have the same dimension!''' )
@overload
def __mul__( self , __lowerCAmelCase ):
"""simple docstring"""
...
@overload
def __mul__( self , __lowerCAmelCase ):
"""simple docstring"""
...
def __mul__( self , __lowerCAmelCase ):
"""simple docstring"""
if isinstance(__lowerCAmelCase , __lowerCAmelCase ): # matrix-vector
if len(__lowerCAmelCase ) == self.__width:
__magic_name__ :Tuple = zero_vector(self.__height )
for i in range(self.__height ):
__magic_name__ :Optional[int] = [
self.__matrix[i][j] * other.component(__lowerCAmelCase )
for j in range(self.__width )
]
ans.change_component(__lowerCAmelCase , sum(__lowerCAmelCase ) )
return ans
else:
raise Exception(
'''vector must have the same size as the '''
'''number of columns of the matrix!''' )
elif isinstance(__lowerCAmelCase , (int, float) ): # matrix-scalar
__magic_name__ :Optional[Any] = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(__lowerCAmelCase , self.__width , self.__height )
return None
def A ( self ):
"""simple docstring"""
return self.__height
def A ( self ):
"""simple docstring"""
return self.__width
def A ( self , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception('''change_component: indices out of bounds''' )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
if 0 <= x < self.__height and 0 <= y < self.__width:
__magic_name__ :Union[str, Any] = value
else:
raise Exception('''change_component: indices out of bounds''' )
def A ( self , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
if self.__height != self.__width:
raise Exception('''Matrix is not square''' )
__magic_name__ :Optional[int] = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(__lowerCAmelCase ) ):
__magic_name__ :Optional[Any] = minor[i][:y] + minor[i][y + 1 :]
return Matrix(__lowerCAmelCase , self.__width - 1 , self.__height - 1 ).determinant()
def A ( self , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
if self.__height != self.__width:
raise Exception('''Matrix is not square''' )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(__lowerCAmelCase , __lowerCAmelCase )
else:
raise Exception('''Indices out of bounds''' )
def A ( self ):
"""simple docstring"""
if self.__height != self.__width:
raise Exception('''Matrix is not square''' )
if self.__height < 1:
raise Exception('''Matrix has no element''' )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
__magic_name__ :int = [
self.__matrix[0][y] * self.cofactor(0 , __lowerCAmelCase ) for y in range(self.__width )
]
return sum(__lowerCAmelCase )
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ :list[list[float]] = [[0] * n for _ in range(snake_case )]
return Matrix(snake_case, snake_case, snake_case )
def __lowercase ( snake_case, snake_case, snake_case, snake_case ):
"""simple docstring"""
random.seed(snake_case )
__magic_name__ :list[list[float]] = [
[random.randint(snake_case, snake_case ) for _ in range(snake_case )] for _ in range(snake_case )
]
return Matrix(snake_case, snake_case, snake_case )
| 0 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCamelCase_ ( lowerCamelCase ):
a__ = ['''image_processor''', '''tokenizer''']
a__ = '''ChineseCLIPImageProcessor'''
a__ = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Tuple = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __lowerCAmelCase , )
__magic_name__ :Optional[Any] = kwargs.pop('''feature_extractor''' )
__magic_name__ :Tuple = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :List[Any] = self.image_processor
def __call__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase ):
"""simple docstring"""
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
__magic_name__ :int = self.tokenizer(__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase )
if images is not None:
__magic_name__ :Dict = self.image_processor(__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase )
if text is not None and images is not None:
__magic_name__ :Union[str, Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__lowerCAmelCase ) , tensor_type=__lowerCAmelCase )
def A ( self , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase )
def A ( self , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase )
@property
def A ( self ):
"""simple docstring"""
__magic_name__ :List[Any] = self.tokenizer.model_input_names
__magic_name__ :Any = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def A ( self ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __lowerCAmelCase , )
return self.image_processor_class
| 0 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : str = logging.get_logger(__name__)
lowercase : List[Any] = {
'''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''',
'''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''',
'''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''',
'''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''',
'''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''',
}
class lowerCamelCase__ ( UpperCAmelCase__):
'''simple docstring'''
_A = 'rwkv'
_A = {'max_position_embeddings': 'context_length'}
def __init__( self :List[str] , a :Union[str, Any]=5_0_2_7_7 , a :str=1_0_2_4 , a :Union[str, Any]=4_0_9_6 , a :Optional[int]=3_2 , a :Optional[int]=None , a :Optional[Any]=None , a :List[Any]=1E-5 , a :Union[str, Any]=0 , a :Optional[Any]=0 , a :Dict=6 , a :int=False , a :Tuple=True , **a :List[str] , ) -> List[Any]:
__UpperCamelCase : List[Any] = vocab_size
__UpperCamelCase : Dict = context_length
__UpperCamelCase : Dict = hidden_size
__UpperCamelCase : Any = num_hidden_layers
__UpperCamelCase : Tuple = attention_hidden_size if attention_hidden_size is not None else hidden_size
__UpperCamelCase : List[Any] = intermediate_size if intermediate_size is not None else 4 * hidden_size
__UpperCamelCase : int = layer_norm_epsilon
__UpperCamelCase : int = rescale_every
__UpperCamelCase : str = use_cache
__UpperCamelCase : List[Any] = bos_token_id
__UpperCamelCase : Optional[int] = eos_token_id
super().__init__(
tie_word_embeddings=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase ) | 710 |
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
lowercase : List[str] = argparse.ArgumentParser()
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--txt2img_unclip',
default='kakaobrain/karlo-v1-alpha',
type=str,
required=False,
help='The pretrained txt2img unclip.',
)
lowercase : int = parser.parse_args()
lowercase : str = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
lowercase : int = CLIPImageProcessor()
lowercase : Optional[int] = CLIPVisionModelWithProjection.from_pretrained('openai/clip-vit-large-patch14')
lowercase : int = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path) | 94 | 0 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE_ ( __A : int ) -> bool:
"""simple docstring"""
a_ : Union[str, Any] = str(__A )
return len(__A ) == 9 and set(__A ) == set('123456789' )
def SCREAMING_SNAKE_CASE_ ( ) -> int | None:
"""simple docstring"""
for base_num in range(99_99 , 49_99 , -1 ):
a_ : int = 10_00_02 * base_num
if is_9_pandigital(__A ):
return candidate
for base_num in range(3_33 , 99 , -1 ):
a_ : List[str] = 1_00_20_03 * base_num
if is_9_pandigital(__A ):
return candidate
return None
if __name__ == "__main__":
print(F'{solution() = }')
| 570 |
from ..utils import DummyObject, requires_backends
class SCREAMING_SNAKE_CASE__ ( metaclass=lowercase__ ):
snake_case__ : List[str] = ['''onnx''']
def __init__( self : List[Any] , *SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : Dict ) -> Any:
requires_backends(self , ['onnx'] )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : List[Any] , *SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : Any ) -> Optional[int]:
requires_backends(cls , ['onnx'] )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : str , *SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> Union[str, Any]:
requires_backends(cls , ['onnx'] )
| 570 | 1 |
"""simple docstring"""
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class a_ ( _UpperCamelCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCamelCase_, 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(lowerCamelCase_, 'num_attention_heads' ) )
self.parent.assertTrue(hasattr(lowerCamelCase_, 'num_encoder_blocks' ) )
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_=1_3, lowerCamelCase_=6_4, lowerCamelCase_=3, lowerCamelCase_=4, lowerCamelCase_=[2, 2, 2, 2], lowerCamelCase_=[8, 4, 2, 1], lowerCamelCase_=[1_6, 3_2, 6_4, 1_2_8], lowerCamelCase_=[1, 4, 8, 1_6], lowerCamelCase_=[1, 2, 4, 8], lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_="gelu", lowerCamelCase_=0.1, lowerCamelCase_=0.1, lowerCamelCase_=0.02, lowerCamelCase_=3, lowerCamelCase_=None, ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = parent
lowerCamelCase__ : Optional[Any] = batch_size
lowerCamelCase__ : Tuple = image_size
lowerCamelCase__ : int = num_channels
lowerCamelCase__ : Optional[int] = num_encoder_blocks
lowerCamelCase__ : Optional[Any] = sr_ratios
lowerCamelCase__ : Any = depths
lowerCamelCase__ : Dict = hidden_sizes
lowerCamelCase__ : Union[str, Any] = downsampling_rates
lowerCamelCase__ : str = num_attention_heads
lowerCamelCase__ : List[Any] = is_training
lowerCamelCase__ : Dict = use_labels
lowerCamelCase__ : str = hidden_act
lowerCamelCase__ : Optional[Any] = hidden_dropout_prob
lowerCamelCase__ : Optional[int] = attention_probs_dropout_prob
lowerCamelCase__ : Tuple = initializer_range
lowerCamelCase__ : int = num_labels
lowerCamelCase__ : Union[str, Any] = scope
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : List[Any] = None
if self.use_labels:
lowerCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels )
lowerCamelCase__ : List[Any] = self.get_config()
return config, pixel_values, labels
def a__ (self ):
'''simple docstring'''
return SegformerConfig(
image_size=self.image_size, num_channels=self.num_channels, num_encoder_blocks=self.num_encoder_blocks, depths=self.depths, hidden_sizes=self.hidden_sizes, num_attention_heads=self.num_attention_heads, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, initializer_range=self.initializer_range, )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = SegformerModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[str] = self.num_labels
lowerCamelCase__ : List[str] = SegformerForSemanticSegmentation(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : int = model(lowerCamelCase_ )
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss, 0.0 )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[str] = 1
lowerCamelCase__ : List[Any] = SegformerForSemanticSegmentation(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : int = torch.randint(0, 1, (self.batch_size, self.image_size, self.image_size) ).to(lowerCamelCase_ )
lowerCamelCase__ : int = model(lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertGreater(result.loss, 0.0 )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = self.prepare_config_and_inputs()
lowerCamelCase__ : Any = config_and_inputs
lowerCamelCase__ : Optional[int] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class a_ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Dict = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase__ : Union[str, Any] = (
{
'feature-extraction': SegformerModel,
'image-classification': SegformerForImageClassification,
'image-segmentation': SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCamelCase__ : Any = True
lowerCamelCase__ : List[Any] = False
lowerCamelCase__ : str = False
lowerCamelCase__ : Any = False
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = SegformerModelTester(self )
lowerCamelCase__ : Union[str, Any] = SegformerConfigTester(self, config_class=lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*lowerCamelCase_ )
@unittest.skip('SegFormer does not use inputs_embeds' )
def a__ (self ):
'''simple docstring'''
pass
@unittest.skip('SegFormer does not have get_input_embeddings method and get_output_embeddings methods' )
def a__ (self ):
'''simple docstring'''
pass
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Any = model_class(lowerCamelCase_ )
lowerCamelCase__ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : Tuple = [*signature.parameters.keys()]
lowerCamelCase__ : Dict = ["pixel_values"]
self.assertListEqual(arg_names[:1], lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Union[str, Any] = True
for model_class in self.all_model_classes:
lowerCamelCase__ : Any = True
lowerCamelCase__ : int = False
lowerCamelCase__ : str = True
lowerCamelCase__ : Union[str, Any] = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
lowerCamelCase__ : Optional[int] = model(**self._prepare_for_class(lowerCamelCase_, lowerCamelCase_ ) )
lowerCamelCase__ : Dict = outputs.attentions
lowerCamelCase__ : Optional[int] = sum(self.model_tester.depths )
self.assertEqual(len(lowerCamelCase_ ), lowerCamelCase_ )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCamelCase__ : Union[str, Any] = True
lowerCamelCase__ : Union[str, Any] = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
lowerCamelCase__ : List[Any] = model(**self._prepare_for_class(lowerCamelCase_, lowerCamelCase_ ) )
lowerCamelCase__ : Optional[int] = outputs.attentions
self.assertEqual(len(lowerCamelCase_ ), lowerCamelCase_ )
# verify the first attentions (first block, first layer)
lowerCamelCase__ : Optional[int] = (self.model_tester.image_size // 4) ** 2
lowerCamelCase__ : Dict = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ), [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len], )
# verify the last attentions (last block, last layer)
lowerCamelCase__ : Tuple = (self.model_tester.image_size // 3_2) ** 2
lowerCamelCase__ : Optional[int] = (self.model_tester.image_size // (3_2 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ), [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len], )
lowerCamelCase__ : Optional[Any] = len(lowerCamelCase_ )
# Check attention is always last and order is fine
lowerCamelCase__ : Optional[int] = True
lowerCamelCase__ : Optional[int] = True
lowerCamelCase__ : List[str] = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
lowerCamelCase__ : Optional[Any] = model(**self._prepare_for_class(lowerCamelCase_, lowerCamelCase_ ) )
self.assertEqual(out_len + 1, len(lowerCamelCase_ ) )
lowerCamelCase__ : Any = outputs.attentions
self.assertEqual(len(lowerCamelCase_ ), lowerCamelCase_ )
# verify the first attentions (first block, first layer)
lowerCamelCase__ : Dict = (self.model_tester.image_size // 4) ** 2
lowerCamelCase__ : int = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ), [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len], )
def a__ (self ):
'''simple docstring'''
def check_hidden_states_output(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
lowerCamelCase__ : List[str] = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
lowerCamelCase__ : str = model(**self._prepare_for_class(lowerCamelCase_, lowerCamelCase_ ) )
lowerCamelCase__ : Dict = outputs.hidden_states
lowerCamelCase__ : int = self.model_tester.num_encoder_blocks
self.assertEqual(len(lowerCamelCase_ ), lowerCamelCase_ )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ), [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
], )
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : List[Any] = True
check_hidden_states_output(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase__ : str = True
check_hidden_states_output(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
if not self.model_tester.is_training:
return
lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : List[Any] = True
for model_class in self.all_model_classes:
if model_class in get_values(lowerCamelCase_ ):
continue
lowerCamelCase__ : Optional[int] = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.train()
lowerCamelCase__ : List[Any] = self._prepare_for_class(lowerCamelCase_, lowerCamelCase_, return_labels=lowerCamelCase_ )
lowerCamelCase__ : Any = model(**lowerCamelCase_ ).loss
loss.backward()
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def a__ (self ):
'''simple docstring'''
pass
@slow
def a__ (self ):
'''simple docstring'''
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : List[str] = SegformerModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def lowerCamelCase_ ( ):
lowerCamelCase__ : Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
class a_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = SegformerImageProcessor(
image_scale=(5_1_2, 5_1_2), keep_ratio=lowerCamelCase_, align=lowerCamelCase_, do_random_crop=lowerCamelCase_ )
lowerCamelCase__ : str = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512' ).to(
lowerCamelCase_ )
lowerCamelCase__ : str = prepare_img()
lowerCamelCase__ : List[Any] = image_processor(images=lowerCamelCase_, return_tensors='pt' )
lowerCamelCase__ : Optional[Any] = encoded_inputs.pixel_values.to(lowerCamelCase_ )
with torch.no_grad():
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = torch.Size((1, model.config.num_labels, 1_2_8, 1_2_8) )
self.assertEqual(outputs.logits.shape, lowerCamelCase_ )
lowerCamelCase__ : int = torch.tensor(
[
[[-4.6_310, -5.5_232, -6.2_356], [-5.1_921, -6.1_444, -6.5_996], [-5.4_424, -6.2_790, -6.7_574]],
[[-12.1_391, -13.3_122, -13.9_554], [-12.8_732, -13.9_352, -14.3_563], [-12.9_438, -13.8_226, -14.2_513]],
[[-12.5_134, -13.4_686, -14.4_915], [-12.8_669, -14.4_343, -14.7_758], [-13.2_523, -14.5_819, -15.0_694]],
] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3], lowerCamelCase_, atol=1e-4 ) )
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = SegformerImageProcessor(
image_scale=(5_1_2, 5_1_2), keep_ratio=lowerCamelCase_, align=lowerCamelCase_, do_random_crop=lowerCamelCase_ )
lowerCamelCase__ : Dict = SegformerForSemanticSegmentation.from_pretrained(
'nvidia/segformer-b1-finetuned-cityscapes-1024-1024' ).to(lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = prepare_img()
lowerCamelCase__ : List[str] = image_processor(images=lowerCamelCase_, return_tensors='pt' )
lowerCamelCase__ : Any = encoded_inputs.pixel_values.to(lowerCamelCase_ )
with torch.no_grad():
lowerCamelCase__ : Any = model(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = torch.Size((1, model.config.num_labels, 1_2_8, 1_2_8) )
self.assertEqual(outputs.logits.shape, lowerCamelCase_ )
lowerCamelCase__ : Dict = torch.tensor(
[
[[-13.5_748, -13.9_111, -12.6_500], [-14.3_500, -15.3_683, -14.2_328], [-14.7_532, -16.0_424, -15.6_087]],
[[-17.1_651, -15.8_725, -12.9_653], [-17.2_580, -17.3_718, -14.8_223], [-16.6_058, -16.8_783, -16.7_452]],
[[-3.6_456, -3.0_209, -1.4_203], [-3.0_797, -3.1_959, -2.0_000], [-1.8_757, -1.9_217, -1.6_997]],
] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3], lowerCamelCase_, atol=1e-1 ) )
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = SegformerImageProcessor(
image_scale=(5_1_2, 5_1_2), keep_ratio=lowerCamelCase_, align=lowerCamelCase_, do_random_crop=lowerCamelCase_ )
lowerCamelCase__ : Dict = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512' ).to(
lowerCamelCase_ )
lowerCamelCase__ : Tuple = prepare_img()
lowerCamelCase__ : Dict = image_processor(images=lowerCamelCase_, return_tensors='pt' )
lowerCamelCase__ : int = encoded_inputs.pixel_values.to(lowerCamelCase_ )
with torch.no_grad():
lowerCamelCase__ : List[str] = model(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = outputs.logits.detach().cpu()
lowerCamelCase__ : Union[str, Any] = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase_, target_sizes=[(5_0_0, 3_0_0)] )
lowerCamelCase__ : Union[str, Any] = torch.Size((5_0_0, 3_0_0) )
self.assertEqual(segmentation[0].shape, lowerCamelCase_ )
lowerCamelCase__ : Any = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase_ )
lowerCamelCase__ : List[str] = torch.Size((1_2_8, 1_2_8) )
self.assertEqual(segmentation[0].shape, lowerCamelCase_ )
| 715 |
"""simple docstring"""
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
A_ : int = {
"return_dict": False,
"output_hidden_states": True,
"output_attentions": True,
"torchscript": True,
"torch_dtype": "float16",
"use_bfloat16": True,
"tf_legacy_loss": True,
"pruned_heads": {"a": 1},
"tie_word_embeddings": False,
"is_decoder": True,
"cross_attention_hidden_size": 1_28,
"add_cross_attention": True,
"tie_encoder_decoder": True,
"max_length": 50,
"min_length": 3,
"do_sample": True,
"early_stopping": True,
"num_beams": 3,
"num_beam_groups": 3,
"diversity_penalty": 0.5,
"temperature": 2.0,
"top_k": 10,
"top_p": 0.7,
"typical_p": 0.2,
"repetition_penalty": 0.8,
"length_penalty": 0.8,
"no_repeat_ngram_size": 5,
"encoder_no_repeat_ngram_size": 5,
"bad_words_ids": [1, 2, 3],
"num_return_sequences": 3,
"chunk_size_feed_forward": 5,
"output_scores": True,
"return_dict_in_generate": True,
"forced_bos_token_id": 2,
"forced_eos_token_id": 3,
"remove_invalid_values": True,
"architectures": ["BertModel"],
"finetuning_task": "translation",
"id2label": {0: "label"},
"label2id": {"label": "0"},
"tokenizer_class": "BertTokenizerFast",
"prefix": "prefix",
"bos_token_id": 6,
"pad_token_id": 7,
"eos_token_id": 8,
"sep_token_id": 9,
"decoder_start_token_id": 10,
"exponential_decay_length_penalty": (5, 1.01),
"suppress_tokens": [0, 1],
"begin_suppress_tokens": 2,
"task_specific_params": {"translation": "some_params"},
"problem_type": "regression",
}
@is_staging_test
class a_ ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def a__ (cls ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = TOKEN
HfFolder.save_token(lowerCamelCase_ )
@classmethod
def a__ (cls ):
'''simple docstring'''
try:
delete_repo(token=cls._token, repo_id='test-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='valid_org/test-config-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='test-dynamic-config' )
except HTTPError:
pass
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = BertConfig(
vocab_size=9_9, hidden_size=3_2, num_hidden_layers=5, num_attention_heads=4, intermediate_size=3_7 )
config.push_to_hub('test-config', use_auth_token=self._token )
lowerCamelCase__ : Optional[int] = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
# Reset repo
delete_repo(token=self._token, repo_id='test-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCamelCase_, repo_id='test-config', push_to_hub=lowerCamelCase_, use_auth_token=self._token )
lowerCamelCase__ : List[str] = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = BertConfig(
vocab_size=9_9, hidden_size=3_2, num_hidden_layers=5, num_attention_heads=4, intermediate_size=3_7 )
config.push_to_hub('valid_org/test-config-org', use_auth_token=self._token )
lowerCamelCase__ : Union[str, Any] = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
# Reset repo
delete_repo(token=self._token, repo_id='valid_org/test-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowerCamelCase_, repo_id='valid_org/test-config-org', push_to_hub=lowerCamelCase_, use_auth_token=self._token )
lowerCamelCase__ : str = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
def a__ (self ):
'''simple docstring'''
CustomConfig.register_for_auto_class()
lowerCamelCase__ : Optional[int] = CustomConfig(attribute=4_2 )
config.push_to_hub('test-dynamic-config', use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map, {'AutoConfig': 'custom_configuration.CustomConfig'} )
lowerCamelCase__ : List[str] = AutoConfig.from_pretrained(f'''{USER}/test-dynamic-config''', trust_remote_code=lowerCamelCase_ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__, 'CustomConfig' )
self.assertEqual(new_config.attribute, 4_2 )
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
lowerCamelCase__ : Tuple = c.n_embd + 1 # int
lowerCamelCase__ : Union[str, Any] = c.resid_pdrop + 1.0 # float
lowerCamelCase__ : List[Any] = not c.scale_attn_weights # bool
lowerCamelCase__ : List[Any] = c.summary_type + 'foo' # str
c.update_from_string(
f'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(lowerCamelCase_, c.n_embd, 'mismatch for key: n_embd' )
self.assertEqual(lowerCamelCase_, c.resid_pdrop, 'mismatch for key: resid_pdrop' )
self.assertEqual(lowerCamelCase_, c.scale_attn_weights, 'mismatch for key: scale_attn_weights' )
self.assertEqual(lowerCamelCase_, c.summary_type, 'mismatch for key: summary_type' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = PretrainedConfig()
lowerCamelCase__ : Optional[Any] = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
lowerCamelCase_, ['is_encoder_decoder', '_name_or_path', '_commit_hash', 'transformers_version'] )
lowerCamelCase__ : Any = [key for key, value in config_common_kwargs.items() if value == getattr(lowerCamelCase_, lowerCamelCase_ )]
if len(lowerCamelCase_ ) > 0:
raise ValueError(
'The following keys are set with the default values in'
' `test_configuration_common.config_common_kwargs` pick another value for them:'
f''' {', '.join(lowerCamelCase_ )}.''' )
def a__ (self ):
'''simple docstring'''
with self.assertRaises(lowerCamelCase_ ):
# config is in subfolder, the following should not work without specifying the subfolder
lowerCamelCase__ : Union[str, Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' )
lowerCamelCase__ : int = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder', subfolder='bert' )
self.assertIsNotNone(lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = mock.Mock()
lowerCamelCase__ : List[str] = 5_0_0
lowerCamelCase__ : Any = {}
lowerCamelCase__ : int = HTTPError
lowerCamelCase__ : Optional[Any] = {}
# Download this model to make sure it's in the cache.
lowerCamelCase__ : Any = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request', return_value=lowerCamelCase_ ) as mock_head:
lowerCamelCase__ : List[str] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = BertConfig.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = AutoConfig.from_pretrained('bert-base-cased' )
lowerCamelCase__ : str = ['config.4.0.0.json']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = 2
json.dump(configuration.to_dict(), open(os.path.join(lowerCamelCase_, 'config.4.0.0.json' ), 'w' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
lowerCamelCase__ : Union[str, Any] = AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertEqual(new_configuration.hidden_size, 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
lowerCamelCase__ : str = ['config.42.0.0.json']
lowerCamelCase__ : Union[str, Any] = 7_6_8
configuration.save_pretrained(lowerCamelCase_ )
shutil.move(os.path.join(lowerCamelCase_, 'config.4.0.0.json' ), os.path.join(lowerCamelCase_, 'config.42.0.0.json' ) )
lowerCamelCase__ : Union[str, Any] = AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertEqual(new_configuration.hidden_size, 7_6_8 )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = 'hf-internal-testing/test-two-configs'
import transformers as new_transformers
lowerCamelCase__ : Optional[int] = 'v4.0.0'
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = new_transformers.models.auto.AutoConfig.from_pretrained(
lowerCamelCase_, return_unused_kwargs=lowerCamelCase_ )
self.assertEqual(new_configuration.hidden_size, 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(lowerCamelCase_, {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
lowerCamelCase__ : Dict = 'v3.0.0'
lowerCamelCase__ : List[str] = old_transformers.models.auto.AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertEqual(old_configuration.hidden_size, 7_6_8 )
| 696 | 0 |
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCamelCase__ :
"""simple docstring"""
def __init__(self , __a , __a=13 , __a=32 , __a=2 , __a=3 , __a=16 , __a=[1, 2, 1] , __a=[2, 2, 4] , __a=2 , __a=2.0 , __a=True , __a=0.0 , __a=0.0 , __a=0.1 , __a="gelu" , __a=False , __a=True , __a=0.02 , __a=1E-5 , __a=True , __a=None , __a=True , __a=10 , __a=8 , ):
'''simple docstring'''
lowerCamelCase = parent
lowerCamelCase = batch_size
lowerCamelCase = image_size
lowerCamelCase = patch_size
lowerCamelCase = num_channels
lowerCamelCase = embed_dim
lowerCamelCase = depths
lowerCamelCase = num_heads
lowerCamelCase = window_size
lowerCamelCase = mlp_ratio
lowerCamelCase = qkv_bias
lowerCamelCase = hidden_dropout_prob
lowerCamelCase = attention_probs_dropout_prob
lowerCamelCase = drop_path_rate
lowerCamelCase = hidden_act
lowerCamelCase = use_absolute_embeddings
lowerCamelCase = patch_norm
lowerCamelCase = layer_norm_eps
lowerCamelCase = initializer_range
lowerCamelCase = is_training
lowerCamelCase = scope
lowerCamelCase = use_labels
lowerCamelCase = type_sequence_label_size
lowerCamelCase = encoder_stride
def _a (self ):
'''simple docstring'''
lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase = None
if self.use_labels:
lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase = self.get_config()
return config, pixel_values, labels
def _a (self ):
'''simple docstring'''
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def _a (self , __a , __a , __a ):
'''simple docstring'''
lowerCamelCase = SwinvaModel(config=__a )
model.to(__a )
model.eval()
lowerCamelCase = model(__a )
lowerCamelCase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowerCamelCase = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def _a (self , __a , __a , __a ):
'''simple docstring'''
lowerCamelCase = SwinvaForMaskedImageModeling(config=__a )
model.to(__a )
model.eval()
lowerCamelCase = model(__a )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowerCamelCase = 1
lowerCamelCase = SwinvaForMaskedImageModeling(__a )
model.to(__a )
model.eval()
lowerCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def _a (self , __a , __a , __a ):
'''simple docstring'''
lowerCamelCase = self.type_sequence_label_size
lowerCamelCase = SwinvaForImageClassification(__a )
model.to(__a )
model.eval()
lowerCamelCase = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _a (self ):
'''simple docstring'''
lowerCamelCase = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase = config_and_inputs
lowerCamelCase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase):
"""simple docstring"""
_A = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
_A = (
{'feature-extraction': SwinvaModel, 'image-classification': SwinvaForImageClassification}
if is_torch_available()
else {}
)
_A = False
_A = False
_A = False
_A = False
def _a (self ):
'''simple docstring'''
lowerCamelCase = SwinvaModelTester(self )
lowerCamelCase = ConfigTester(self , config_class=__a , embed_dim=37 )
def _a (self ):
'''simple docstring'''
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _a (self ):
'''simple docstring'''
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
@unittest.skip(reason="Got `CUDA error: misaligned address` with PyTorch 2.0.0." )
def _a (self ):
'''simple docstring'''
pass
@unittest.skip(reason="Swinv2 does not use inputs_embeds" )
def _a (self ):
'''simple docstring'''
pass
def _a (self ):
'''simple docstring'''
lowerCamelCase , lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase = model_class(__a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__a , nn.Linear ) )
def _a (self ):
'''simple docstring'''
lowerCamelCase , lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase = model_class(__a )
lowerCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase = [*signature.parameters.keys()]
lowerCamelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __a )
def _a (self ):
'''simple docstring'''
lowerCamelCase , lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase = True
for model_class in self.all_model_classes:
lowerCamelCase = True
lowerCamelCase = False
lowerCamelCase = True
lowerCamelCase = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
lowerCamelCase = model(**self._prepare_for_class(__a , __a ) )
lowerCamelCase = outputs.attentions
lowerCamelCase = len(self.model_tester.depths )
self.assertEqual(len(__a ) , __a )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCamelCase = True
lowerCamelCase = config.window_size**2
lowerCamelCase = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
lowerCamelCase = model(**self._prepare_for_class(__a , __a ) )
lowerCamelCase = outputs.attentions
self.assertEqual(len(__a ) , __a )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
lowerCamelCase = len(__a )
# Check attention is always last and order is fine
lowerCamelCase = True
lowerCamelCase = True
lowerCamelCase = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
lowerCamelCase = model(**self._prepare_for_class(__a , __a ) )
if hasattr(self.model_tester , "num_hidden_states_types" ):
lowerCamelCase = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
lowerCamelCase = 2
self.assertEqual(out_len + added_hidden_states , len(__a ) )
lowerCamelCase = outputs.attentions
self.assertEqual(len(__a ) , __a )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def _a (self , __a , __a , __a , __a ):
'''simple docstring'''
lowerCamelCase = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
lowerCamelCase = model(**self._prepare_for_class(__a , __a ) )
lowerCamelCase = outputs.hidden_states
lowerCamelCase = getattr(
self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__a ) , __a )
# Swinv2 has a different seq_length
lowerCamelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCamelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
lowerCamelCase = outputs.reshaped_hidden_states
self.assertEqual(len(__a ) , __a )
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = reshaped_hidden_states[0].shape
lowerCamelCase = (
reshaped_hidden_states[0].view(__a , __a , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def _a (self ):
'''simple docstring'''
lowerCamelCase , lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
lowerCamelCase = True
self.check_hidden_states_output(__a , __a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase = True
self.check_hidden_states_output(__a , __a , __a , __a )
def _a (self ):
'''simple docstring'''
lowerCamelCase , lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase = 3
lowerCamelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowerCamelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCamelCase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowerCamelCase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
lowerCamelCase = True
self.check_hidden_states_output(__a , __a , __a , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase = True
self.check_hidden_states_output(__a , __a , __a , (padded_height, padded_width) )
def _a (self ):
'''simple docstring'''
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__a )
def _a (self ):
'''simple docstring'''
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
@slow
def _a (self ):
'''simple docstring'''
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase = SwinvaModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def _a (self ):
'''simple docstring'''
lowerCamelCase , lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase = _config_zero_init(__a )
for model_class in self.all_model_classes:
lowerCamelCase = model_class(config=__a )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@require_vision
@require_torch
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
@cached_property
def _a (self ):
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained("microsoft/swinv2-tiny-patch4-window8-256" )
if is_vision_available()
else None
)
@slow
def _a (self ):
'''simple docstring'''
lowerCamelCase = SwinvaForImageClassification.from_pretrained("microsoft/swinv2-tiny-patch4-window8-256" ).to(
__a )
lowerCamelCase = self.default_image_processor
lowerCamelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
lowerCamelCase = image_processor(images=__a , return_tensors="pt" ).to(__a )
# forward pass
with torch.no_grad():
lowerCamelCase = model(**__a )
# verify the logits
lowerCamelCase = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __a )
lowerCamelCase = torch.tensor([-0.3947, -0.4306, 0.0026] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1E-4 ) ) | 623 |
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
a_ : Union[str, Any] = NewType('DataClass', Any)
a_ : int = NewType('DataClassType', Any)
def __lowercase( UpperCAmelCase__ ):
"""simple docstring"""
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
F"""Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).""" )
def __lowercase( UpperCAmelCase__ ):
"""simple docstring"""
lowerCamelCase = {str(UpperCAmelCase__ ): choice for choice in choices}
return lambda UpperCAmelCase__ : str_to_choice.get(UpperCAmelCase__ , UpperCAmelCase__ )
def __lowercase( *,
UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = dataclasses.MISSING , UpperCAmelCase__ = dataclasses.MISSING , UpperCAmelCase__ = None , **UpperCAmelCase__ , ):
"""simple docstring"""
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
lowerCamelCase = {}
if aliases is not None:
lowerCamelCase = aliases
if help is not None:
lowerCamelCase = help
return dataclasses.field(metadata=UpperCAmelCase__ , default=UpperCAmelCase__ , default_factory=UpperCAmelCase__ , **UpperCAmelCase__ )
class lowerCamelCase__ ( UpperCAmelCase_):
"""simple docstring"""
_A = 42
def __init__(self , __a , **__a ):
'''simple docstring'''
if "formatter_class" not in kwargs:
lowerCamelCase = ArgumentDefaultsHelpFormatter
super().__init__(**__a )
if dataclasses.is_dataclass(__a ):
lowerCamelCase = [dataclass_types]
lowerCamelCase = list(__a )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(__a )
@staticmethod
def _a (__a , __a ):
'''simple docstring'''
lowerCamelCase = F"""--{field.name}"""
lowerCamelCase = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , __a ):
raise RuntimeError(
"Unresolved type detected, which should have been done with the help of "
"`typing.get_type_hints` method by default" )
lowerCamelCase = kwargs.pop("aliases" , [] )
if isinstance(__a , __a ):
lowerCamelCase = [aliases]
lowerCamelCase = getattr(field.type , "__origin__" , field.type )
if origin_type is Union or (hasattr(__a , "UnionType" ) and isinstance(__a , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(__a ) not in field.type.__args__
):
raise ValueError(
"Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because"
" the argument parser only supports one type per argument."
F""" Problem encountered in field '{field.name}'.""" )
if type(__a ) not in field.type.__args__:
# filter `str` in Union
lowerCamelCase = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
lowerCamelCase = getattr(field.type , "__origin__" , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
lowerCamelCase = (
field.type.__args__[0] if isinstance(__a , field.type.__args__[1] ) else field.type.__args__[1]
)
lowerCamelCase = getattr(field.type , "__origin__" , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
lowerCamelCase = {}
if origin_type is Literal or (isinstance(field.type , __a ) and issubclass(field.type , __a )):
if origin_type is Literal:
lowerCamelCase = field.type.__args__
else:
lowerCamelCase = [x.value for x in field.type]
lowerCamelCase = make_choice_type_function(kwargs["choices"] )
if field.default is not dataclasses.MISSING:
lowerCamelCase = field.default
else:
lowerCamelCase = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
lowerCamelCase = copy(__a )
# Hack because type=bool in argparse does not behave as we want.
lowerCamelCase = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
lowerCamelCase = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
lowerCamelCase = default
# This tells argparse we accept 0 or 1 value after --field_name
lowerCamelCase = "?"
# This is the value that will get picked if we do --field_name (without value)
lowerCamelCase = True
elif isclass(__a ) and issubclass(__a , __a ):
lowerCamelCase = field.type.__args__[0]
lowerCamelCase = "+"
if field.default_factory is not dataclasses.MISSING:
lowerCamelCase = field.default_factory()
elif field.default is dataclasses.MISSING:
lowerCamelCase = True
else:
lowerCamelCase = field.type
if field.default is not dataclasses.MISSING:
lowerCamelCase = field.default
elif field.default_factory is not dataclasses.MISSING:
lowerCamelCase = field.default_factory()
else:
lowerCamelCase = True
parser.add_argument(__a , *__a , **__a )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
lowerCamelCase = False
parser.add_argument(F"""--no_{field.name}""" , action="store_false" , dest=field.name , **__a )
def _a (self , __a ):
'''simple docstring'''
if hasattr(__a , "_argument_group_name" ):
lowerCamelCase = self.add_argument_group(dtype._argument_group_name )
else:
lowerCamelCase = self
try:
lowerCamelCase = get_type_hints(__a )
except NameError:
raise RuntimeError(
F"""Type resolution failed for {dtype}. Try declaring the class in global scope or """
"removing line of `from __future__ import annotations` which opts in Postponed "
"Evaluation of Annotations (PEP 563)" )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(__a ):
lowerCamelCase = ".".join(map(__a , sys.version_info[:3] ) )
raise RuntimeError(
F"""Type resolution failed for {dtype} on Python {python_version}. Try removing """
"line of `from __future__ import annotations` which opts in union types as "
"`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To "
"support Python versions that lower than 3.10, you need to use "
"`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of "
"`X | None`." ) from ex
raise
for field in dataclasses.fields(__a ):
if not field.init:
continue
lowerCamelCase = type_hints[field.name]
self._parse_dataclass_field(__a , __a )
def _a (self , __a=None , __a=False , __a=True , __a=None , __a=None , ):
'''simple docstring'''
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
lowerCamelCase = []
if args_filename:
args_files.append(Path(__a ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix(".args" ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
lowerCamelCase = ArgumentParser()
args_file_parser.add_argument(__a , type=__a , action="append" )
# Use only remaining args for further parsing (remove the args_file_flag)
lowerCamelCase , lowerCamelCase = args_file_parser.parse_known_args(args=__a )
lowerCamelCase = vars(__a ).get(args_file_flag.lstrip("-" ) , __a )
if cmd_args_file_paths:
args_files.extend([Path(__a ) for p in cmd_args_file_paths] )
lowerCamelCase = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
lowerCamelCase = file_args + args if args is not None else file_args + sys.argv[1:]
lowerCamelCase , lowerCamelCase = self.parse_known_args(args=__a )
lowerCamelCase = []
for dtype in self.dataclass_types:
lowerCamelCase = {f.name for f in dataclasses.fields(__a ) if f.init}
lowerCamelCase = {k: v for k, v in vars(__a ).items() if k in keys}
for k in keys:
delattr(__a , __a )
lowerCamelCase = dtype(**__a )
outputs.append(__a )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(__a )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(F"""Some specified arguments are not used by the HfArgumentParser: {remaining_args}""" )
return (*outputs,)
def _a (self , __a , __a = False ):
'''simple docstring'''
lowerCamelCase = set(args.keys() )
lowerCamelCase = []
for dtype in self.dataclass_types:
lowerCamelCase = {f.name for f in dataclasses.fields(__a ) if f.init}
lowerCamelCase = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
lowerCamelCase = dtype(**__a )
outputs.append(__a )
if not allow_extra_keys and unused_keys:
raise ValueError(F"""Some keys are not used by the HfArgumentParser: {sorted(__a )}""" )
return tuple(__a )
def _a (self , __a , __a = False ):
'''simple docstring'''
with open(Path(__a ) , encoding="utf-8" ) as open_json_file:
lowerCamelCase = json.loads(open_json_file.read() )
lowerCamelCase = self.parse_dict(__a , allow_extra_keys=__a )
return tuple(__a )
def _a (self , __a , __a = False ):
'''simple docstring'''
lowerCamelCase = self.parse_dict(yaml.safe_load(Path(__a ).read_text() ) , allow_extra_keys=__a )
return tuple(__a ) | 623 | 1 |
"""simple docstring"""
import logging
from transformers import PretrainedConfig
__lowerCAmelCase : List[str] = logging.getLogger(__name__)
__lowerCAmelCase : Optional[Any] = {
"bertabs-finetuned-cnndm": "https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json",
}
class a_ ( __UpperCamelCase ):
UpperCamelCase_ : Tuple = "bertabs"
def __init__( self : Union[str, Any] , snake_case__ : Dict=30522 , snake_case__ : List[str]=512 , snake_case__ : Any=6 , snake_case__ : Optional[Any]=512 , snake_case__ : Optional[Any]=8 , snake_case__ : Union[str, Any]=512 , snake_case__ : int=0.2 , snake_case__ : Any=6 , snake_case__ : str=768 , snake_case__ : Optional[Any]=8 , snake_case__ : Dict=2048 , snake_case__ : Union[str, Any]=0.2 , **snake_case__ : Optional[int] , ):
super().__init__(**snake_case__ )
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = max_pos
lowerCAmelCase__ = enc_layers
lowerCAmelCase__ = enc_hidden_size
lowerCAmelCase__ = enc_heads
lowerCAmelCase__ = enc_ff_size
lowerCAmelCase__ = enc_dropout
lowerCAmelCase__ = dec_layers
lowerCAmelCase__ = dec_hidden_size
lowerCAmelCase__ = dec_heads
lowerCAmelCase__ = dec_ff_size
lowerCAmelCase__ = dec_dropout
| 719 | """simple docstring"""
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class a_ :
def __init__( self : Optional[int] , snake_case__ : List[Any]=2 , snake_case__ : Any=3 , snake_case__ : Union[str, Any]=64 , snake_case__ : Any=None ):
lowerCAmelCase__ = np.random.default_rng(snake_case__ )
lowerCAmelCase__ = length
lowerCAmelCase__ = rng.normal(size=(length,) ).astype(np.floataa )
lowerCAmelCase__ = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self : Optional[Any] ):
return self.length
def __getitem__( self : List[str] , snake_case__ : Optional[int] ):
return {"x": self.x[i], "y": self.y[i]}
class a_ ( torch.nn.Module ):
def __init__( self : List[str] , snake_case__ : str=0 , snake_case__ : Dict=0 , snake_case__ : Any=False ):
super().__init__()
lowerCAmelCase__ = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
lowerCAmelCase__ = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
lowerCAmelCase__ = True
def _SCREAMING_SNAKE_CASE ( self : int , snake_case__ : Any=None ):
if self.first_batch:
print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
lowerCAmelCase__ = False
return x * self.a[0] + self.b[0]
class a_ ( torch.nn.Module ):
def __init__( self : Any , snake_case__ : Union[str, Any]=0 , snake_case__ : Union[str, Any]=0 , snake_case__ : List[Any]=False ):
super().__init__()
lowerCAmelCase__ = torch.nn.Parameter(torch.tensor(snake_case__ ).float() )
lowerCAmelCase__ = torch.nn.Parameter(torch.tensor(snake_case__ ).float() )
lowerCAmelCase__ = True
def _SCREAMING_SNAKE_CASE ( self : Tuple , snake_case__ : Optional[Any]=None ):
if self.first_batch:
print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
lowerCAmelCase__ = False
return x * self.a + self.b
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ = 16 ):
"""simple docstring"""
from datasets import load_dataset
from transformers import AutoTokenizer
lowerCAmelCase__ = AutoTokenizer.from_pretrained("""bert-base-cased""" )
lowerCAmelCase__ = {"""train""": """tests/test_samples/MRPC/train.csv""", """validation""": """tests/test_samples/MRPC/dev.csv"""}
lowerCAmelCase__ = load_dataset("""csv""" , data_files=lowerCamelCase__ )
lowerCAmelCase__ = datasets["""train"""].unique("""label""" )
lowerCAmelCase__ = {v: i for i, v in enumerate(lowerCamelCase__ )}
def tokenize_function(lowerCamelCase__ ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase__ = tokenizer(
examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , padding="""max_length""" )
if "label" in examples:
lowerCAmelCase__ = [label_to_id[l] for l in examples["""label"""]]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCAmelCase__ = datasets.map(
lowerCamelCase__ , batched=lowerCamelCase__ , remove_columns=["""sentence1""", """sentence2""", """label"""] , )
def collate_fn(lowerCamelCase__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowerCamelCase__ , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return tokenizer.pad(lowerCamelCase__ , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
lowerCAmelCase__ = DataLoader(tokenized_datasets["""train"""] , shuffle=lowerCamelCase__ , collate_fn=lowerCamelCase__ , batch_size=2 )
lowerCAmelCase__ = DataLoader(tokenized_datasets["""validation"""] , shuffle=lowerCamelCase__ , collate_fn=lowerCamelCase__ , batch_size=1 )
return train_dataloader, eval_dataloader
| 674 | 0 |
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE :str = logging.get_logger(__name__)
def _lowerCAmelCase ( lowerCAmelCase_ :Tuple )->List[str]:
'''simple docstring'''
snake_case_ = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
snake_case_ = 128
elif "12-12" in model_name:
snake_case_ = 12
snake_case_ = 12
elif "14-14" in model_name:
snake_case_ = 14
snake_case_ = 14
elif "16-16" in model_name:
snake_case_ = 16
snake_case_ = 16
else:
raise ValueError("Model not supported" )
snake_case_ = '''huggingface/label-files'''
if "speech-commands" in model_name:
snake_case_ = 35
snake_case_ = '''speech-commands-v2-id2label.json'''
else:
snake_case_ = 527
snake_case_ = '''audioset-id2label.json'''
snake_case_ = json.load(open(hf_hub_download(lowerCAmelCase_ , lowerCAmelCase_ , repo_type="dataset" ) , "r" ) )
snake_case_ = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
snake_case_ = idalabel
snake_case_ = {v: k for k, v in idalabel.items()}
return config
def _lowerCAmelCase ( lowerCAmelCase_ :Any )->int:
'''simple docstring'''
if "module.v" in name:
snake_case_ = name.replace("module.v" , "audio_spectrogram_transformer" )
if "cls_token" in name:
snake_case_ = name.replace("cls_token" , "embeddings.cls_token" )
if "dist_token" in name:
snake_case_ = name.replace("dist_token" , "embeddings.distillation_token" )
if "pos_embed" in name:
snake_case_ = name.replace("pos_embed" , "embeddings.position_embeddings" )
if "patch_embed.proj" in name:
snake_case_ = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
# transformer blocks
if "blocks" in name:
snake_case_ = name.replace("blocks" , "encoder.layer" )
if "attn.proj" in name:
snake_case_ = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
snake_case_ = name.replace("attn" , "attention.self" )
if "norm1" in name:
snake_case_ = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
snake_case_ = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
snake_case_ = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
snake_case_ = name.replace("mlp.fc2" , "output.dense" )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
snake_case_ = name.replace("audio_spectrogram_transformer.norm" , "audio_spectrogram_transformer.layernorm" )
# classifier head
if "module.mlp_head.0" in name:
snake_case_ = name.replace("module.mlp_head.0" , "classifier.layernorm" )
if "module.mlp_head.1" in name:
snake_case_ = name.replace("module.mlp_head.1" , "classifier.dense" )
return name
def _lowerCAmelCase ( lowerCAmelCase_ :Optional[int] , lowerCAmelCase_ :Optional[Any] )->Any:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
snake_case_ = orig_state_dict.pop(lowerCAmelCase_ )
if "qkv" in key:
snake_case_ = key.split("." )
snake_case_ = int(key_split[3] )
snake_case_ = config.hidden_size
if "weight" in key:
snake_case_ = val[:dim, :]
snake_case_ = val[dim : dim * 2, :]
snake_case_ = val[-dim:, :]
else:
snake_case_ = val[:dim]
snake_case_ = val[dim : dim * 2]
snake_case_ = val[-dim:]
else:
snake_case_ = val
return orig_state_dict
def _lowerCAmelCase ( lowerCAmelCase_ :str )->Optional[Any]:
'''simple docstring'''
snake_case_ = [
'''module.v.head.weight''',
'''module.v.head.bias''',
'''module.v.head_dist.weight''',
'''module.v.head_dist.bias''',
]
for k in ignore_keys:
state_dict.pop(lowerCAmelCase_ , lowerCAmelCase_ )
@torch.no_grad()
def _lowerCAmelCase ( lowerCAmelCase_ :Dict , lowerCAmelCase_ :Optional[int] , lowerCAmelCase_ :Any=False )->Tuple:
'''simple docstring'''
snake_case_ = get_audio_spectrogram_transformer_config(lowerCAmelCase_ )
snake_case_ = {
'''ast-finetuned-audioset-10-10-0.4593''': (
'''https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1'''
),
'''ast-finetuned-audioset-10-10-0.450''': (
'''https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1'''
),
'''ast-finetuned-audioset-10-10-0.448''': (
'''https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1'''
),
'''ast-finetuned-audioset-10-10-0.448-v2''': (
'''https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1'''
),
'''ast-finetuned-audioset-12-12-0.447''': (
'''https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1'''
),
'''ast-finetuned-audioset-14-14-0.443''': (
'''https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1'''
),
'''ast-finetuned-audioset-16-16-0.442''': (
'''https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1'''
),
'''ast-finetuned-speech-commands-v2''': (
'''https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1'''
),
}
# load original state_dict
snake_case_ = model_name_to_url[model_name]
snake_case_ = torch.hub.load_state_dict_from_url(lowerCAmelCase_ , map_location="cpu" )
# remove some keys
remove_keys(lowerCAmelCase_ )
# rename some keys
snake_case_ = convert_state_dict(lowerCAmelCase_ , lowerCAmelCase_ )
# load 🤗 model
snake_case_ = ASTForAudioClassification(lowerCAmelCase_ )
model.eval()
model.load_state_dict(lowerCAmelCase_ )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
snake_case_ = -4.2_6_7_7_3_9_3 if '''speech-commands''' not in model_name else -6.8_4_5_9_7_8
snake_case_ = 4.5_6_8_9_9_7_4 if '''speech-commands''' not in model_name else 5.5_6_5_4_5_2_6
snake_case_ = 1_024 if '''speech-commands''' not in model_name else 128
snake_case_ = ASTFeatureExtractor(mean=lowerCAmelCase_ , std=lowerCAmelCase_ , max_length=lowerCAmelCase_ )
if "speech-commands" in model_name:
snake_case_ = load_dataset("speech_commands" , "v0.02" , split="validation" )
snake_case_ = dataset[0]['''audio''']['''array''']
else:
snake_case_ = hf_hub_download(
repo_id="nielsr/audio-spectogram-transformer-checkpoint" , filename="sample_audio.flac" , repo_type="dataset" , )
snake_case_ = torchaudio.load(lowerCAmelCase_ )
snake_case_ = waveform.squeeze().numpy()
snake_case_ = feature_extractor(lowerCAmelCase_ , sampling_rate=16_000 , return_tensors="pt" )
# forward pass
snake_case_ = model(**lowerCAmelCase_ )
snake_case_ = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
snake_case_ = torch.tensor([-0.8_7_6_0, -7.0_0_4_2, -8.6_6_0_2] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
snake_case_ = torch.tensor([-1.1_9_8_6, -7.0_9_0_3, -8.2_7_1_8] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
snake_case_ = torch.tensor([-2.6_1_2_8, -8.0_0_8_0, -9.4_3_4_4] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
snake_case_ = torch.tensor([-1.5_0_8_0, -7.4_5_3_4, -8.8_9_1_7] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
snake_case_ = torch.tensor([-0.5_0_5_0, -6.5_8_3_3, -8.0_8_4_3] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
snake_case_ = torch.tensor([-0.3_8_2_6, -7.0_3_3_6, -8.2_4_1_3] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
snake_case_ = torch.tensor([-1.2_1_1_3, -6.9_1_0_1, -8.3_4_7_0] )
elif model_name == "ast-finetuned-speech-commands-v2":
snake_case_ = torch.tensor([6.1_5_8_9, -8.0_5_6_6, -8.7_9_8_4] )
else:
raise ValueError("Unknown model name" )
if not torch.allclose(logits[0, :3] , lowerCAmelCase_ , atol=1e-4 ):
raise ValueError("Logits don\'t match" )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCAmelCase_ )
print(F'''Saving feature extractor to {pytorch_dump_folder_path}''' )
feature_extractor.save_pretrained(lowerCAmelCase_ )
if push_to_hub:
print("Pushing model and feature extractor to the hub..." )
model.push_to_hub(F'''MIT/{model_name}''' )
feature_extractor.push_to_hub(F'''MIT/{model_name}''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE :List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''ast-finetuned-audioset-10-10-0.4593''',
type=str,
help='''Name of the Audio Spectrogram Transformer model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
SCREAMING_SNAKE_CASE :Optional[Any] = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 283 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"""naver-clova-ix/donut-base""": """https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json""",
# See all Donut models at https://huggingface.co/models?filter=donut-swin
}
class _lowerCAmelCase ( _lowercase ):
A__ = 'donut-swin'
A__ = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self , __UpperCAmelCase=224 , __UpperCAmelCase=4 , __UpperCAmelCase=3 , __UpperCAmelCase=96 , __UpperCAmelCase=[2, 2, 6, 2] , __UpperCAmelCase=[3, 6, 12, 24] , __UpperCAmelCase=7 , __UpperCAmelCase=4.0 , __UpperCAmelCase=True , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase="gelu" , __UpperCAmelCase=False , __UpperCAmelCase=0.02 , __UpperCAmelCase=1e-5 , **__UpperCAmelCase , ):
super().__init__(**__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = image_size
lowerCAmelCase__ : List[str] = patch_size
lowerCAmelCase__ : int = num_channels
lowerCAmelCase__ : Optional[Any] = embed_dim
lowerCAmelCase__ : int = depths
lowerCAmelCase__ : Dict = len(__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = num_heads
lowerCAmelCase__ : Dict = window_size
lowerCAmelCase__ : str = mlp_ratio
lowerCAmelCase__ : Optional[int] = qkv_bias
lowerCAmelCase__ : Any = hidden_dropout_prob
lowerCAmelCase__ : Union[str, Any] = attention_probs_dropout_prob
lowerCAmelCase__ : List[str] = drop_path_rate
lowerCAmelCase__ : Tuple = hidden_act
lowerCAmelCase__ : List[str] = use_absolute_embeddings
lowerCAmelCase__ : Dict = layer_norm_eps
lowerCAmelCase__ : Any = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCAmelCase__ : List[Any] = int(embed_dim * 2 ** (len(__UpperCAmelCase ) - 1) )
| 678 | 0 |
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class UpperCAmelCase_ (nn.Module ):
"""simple docstring"""
lowerCamelCase : int
lowerCamelCase : int
lowerCamelCase : float = 0.0
lowerCamelCase : int = 1
lowerCamelCase : int = 1
lowerCamelCase : bool = True
lowerCamelCase : bool = False
lowerCamelCase : bool = False
lowerCamelCase : bool = False
lowerCamelCase : jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE__ ( self: int ):
_lowerCAmelCase :Optional[Any] = []
_lowerCAmelCase :List[Any] = []
for i in range(self.num_layers ):
_lowerCAmelCase :List[str] = self.in_channels if i == 0 else self.out_channels
_lowerCAmelCase :str = FlaxResnetBlockaD(
in_channels=_UpperCAmelCase , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_UpperCAmelCase )
_lowerCAmelCase :Dict = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(_UpperCAmelCase )
_lowerCAmelCase :Optional[int] = resnets
_lowerCAmelCase :Tuple = attentions
if self.add_downsample:
_lowerCAmelCase :List[Any] = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self: Tuple , _UpperCAmelCase: Tuple , _UpperCAmelCase: List[str] , _UpperCAmelCase: str , _UpperCAmelCase: List[str]=True ):
_lowerCAmelCase :Optional[int] = ()
for resnet, attn in zip(self.resnets , self.attentions ):
_lowerCAmelCase :List[str] = resnet(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase )
_lowerCAmelCase :int = attn(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase )
output_states += (hidden_states,)
if self.add_downsample:
_lowerCAmelCase :Any = self.downsamplers_a(_UpperCAmelCase )
output_states += (hidden_states,)
return hidden_states, output_states
class UpperCAmelCase_ (nn.Module ):
"""simple docstring"""
lowerCamelCase : int
lowerCamelCase : int
lowerCamelCase : float = 0.0
lowerCamelCase : int = 1
lowerCamelCase : bool = True
lowerCamelCase : jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE__ ( self: Dict ):
_lowerCAmelCase :str = []
for i in range(self.num_layers ):
_lowerCAmelCase :Optional[int] = self.in_channels if i == 0 else self.out_channels
_lowerCAmelCase :str = FlaxResnetBlockaD(
in_channels=_UpperCAmelCase , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_UpperCAmelCase )
_lowerCAmelCase :Tuple = resnets
if self.add_downsample:
_lowerCAmelCase :Dict = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self: Dict , _UpperCAmelCase: Tuple , _UpperCAmelCase: Optional[int] , _UpperCAmelCase: int=True ):
_lowerCAmelCase :Union[str, Any] = ()
for resnet in self.resnets:
_lowerCAmelCase :Tuple = resnet(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase )
output_states += (hidden_states,)
if self.add_downsample:
_lowerCAmelCase :Dict = self.downsamplers_a(_UpperCAmelCase )
output_states += (hidden_states,)
return hidden_states, output_states
class UpperCAmelCase_ (nn.Module ):
"""simple docstring"""
lowerCamelCase : int
lowerCamelCase : int
lowerCamelCase : int
lowerCamelCase : float = 0.0
lowerCamelCase : int = 1
lowerCamelCase : int = 1
lowerCamelCase : bool = True
lowerCamelCase : bool = False
lowerCamelCase : bool = False
lowerCamelCase : bool = False
lowerCamelCase : jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
_lowerCAmelCase :Dict = []
_lowerCAmelCase :Optional[Any] = []
for i in range(self.num_layers ):
_lowerCAmelCase :Dict = self.in_channels if (i == self.num_layers - 1) else self.out_channels
_lowerCAmelCase :Optional[Any] = self.prev_output_channel if i == 0 else self.out_channels
_lowerCAmelCase :int = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_UpperCAmelCase )
_lowerCAmelCase :List[Any] = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(_UpperCAmelCase )
_lowerCAmelCase :str = resnets
_lowerCAmelCase :List[str] = attentions
if self.add_upsample:
_lowerCAmelCase :Union[str, Any] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self: Optional[int] , _UpperCAmelCase: Any , _UpperCAmelCase: int , _UpperCAmelCase: Any , _UpperCAmelCase: str , _UpperCAmelCase: Tuple=True ):
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
_lowerCAmelCase :str = res_hidden_states_tuple[-1]
_lowerCAmelCase :Tuple = res_hidden_states_tuple[:-1]
_lowerCAmelCase :Tuple = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
_lowerCAmelCase :Any = resnet(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase )
_lowerCAmelCase :List[Any] = attn(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase )
if self.add_upsample:
_lowerCAmelCase :int = self.upsamplers_a(_UpperCAmelCase )
return hidden_states
class UpperCAmelCase_ (nn.Module ):
"""simple docstring"""
lowerCamelCase : int
lowerCamelCase : int
lowerCamelCase : int
lowerCamelCase : float = 0.0
lowerCamelCase : int = 1
lowerCamelCase : bool = True
lowerCamelCase : jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE__ ( self: int ):
_lowerCAmelCase :Union[str, Any] = []
for i in range(self.num_layers ):
_lowerCAmelCase :List[str] = self.in_channels if (i == self.num_layers - 1) else self.out_channels
_lowerCAmelCase :Optional[Any] = self.prev_output_channel if i == 0 else self.out_channels
_lowerCAmelCase :int = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_UpperCAmelCase )
_lowerCAmelCase :Optional[int] = resnets
if self.add_upsample:
_lowerCAmelCase :Union[str, Any] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self: Optional[Any] , _UpperCAmelCase: Any , _UpperCAmelCase: Any , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Optional[Any]=True ):
for resnet in self.resnets:
# pop res hidden states
_lowerCAmelCase :List[str] = res_hidden_states_tuple[-1]
_lowerCAmelCase :int = res_hidden_states_tuple[:-1]
_lowerCAmelCase :Tuple = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
_lowerCAmelCase :Union[str, Any] = resnet(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase )
if self.add_upsample:
_lowerCAmelCase :str = self.upsamplers_a(_UpperCAmelCase )
return hidden_states
class UpperCAmelCase_ (nn.Module ):
"""simple docstring"""
lowerCamelCase : int
lowerCamelCase : float = 0.0
lowerCamelCase : int = 1
lowerCamelCase : int = 1
lowerCamelCase : bool = False
lowerCamelCase : bool = False
lowerCamelCase : jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
# there is always at least one resnet
_lowerCAmelCase :Optional[int] = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
_lowerCAmelCase :List[str] = []
for _ in range(self.num_layers ):
_lowerCAmelCase :Optional[int] = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(_UpperCAmelCase )
_lowerCAmelCase :Dict = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_UpperCAmelCase )
_lowerCAmelCase :str = resnets
_lowerCAmelCase :Optional[Any] = attentions
def __call__( self: List[str] , _UpperCAmelCase: str , _UpperCAmelCase: Optional[int] , _UpperCAmelCase: Dict , _UpperCAmelCase: List[Any]=True ):
_lowerCAmelCase :Any = self.resnets[0](_UpperCAmelCase , _UpperCAmelCase )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
_lowerCAmelCase :Union[str, Any] = attn(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase )
_lowerCAmelCase :Tuple = resnet(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase )
return hidden_states | 382 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
a = [
"""EAGER""",
"""AOT_EAGER""",
"""INDUCTOR""",
"""NVFUSER""",
"""AOT_NVFUSER""",
"""AOT_CUDAGRAPHS""",
"""OFI""",
"""FX2TRT""",
"""ONNXRT""",
"""IPEX""",
]
def UpperCamelCase_( __magic_name__ : Tuple , __magic_name__ : str=None , __magic_name__ : str=None , __magic_name__ : Dict=None ):
"""simple docstring"""
_lowerCAmelCase :Union[str, Any] = True
while ask_again:
_lowerCAmelCase :int = input(__magic_name__ )
try:
if default is not None and len(__magic_name__ ) == 0:
return default
return convert_value(__magic_name__ ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(__magic_name__ )
def UpperCamelCase_( __magic_name__ : int , __magic_name__ : str=[] , __magic_name__ : Optional[Any]=None , __magic_name__ : List[str]=0 ):
"""simple docstring"""
_lowerCAmelCase :Tuple = BulletMenu(__magic_name__ , __magic_name__ )
_lowerCAmelCase :int = menu.run(default_choice=__magic_name__ )
return convert_value(__magic_name__ ) if convert_value is not None else result
def UpperCamelCase_( __magic_name__ : int ):
"""simple docstring"""
_lowerCAmelCase :int = int(__magic_name__ )
return ComputeEnvironment(['LOCAL_MACHINE', 'AMAZON_SAGEMAKER'][value] )
def UpperCamelCase_( __magic_name__ : Union[str, Any] ):
"""simple docstring"""
_lowerCAmelCase :int = int(__magic_name__ )
return DistributedType(['NO', 'MULTI_CPU', 'MULTI_XPU', 'MULTI_GPU', 'MULTI_NPU', 'TPU'][value] )
def UpperCamelCase_( __magic_name__ : Dict ):
"""simple docstring"""
_lowerCAmelCase :Tuple = int(__magic_name__ )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def UpperCamelCase_( __magic_name__ : Any ):
"""simple docstring"""
_lowerCAmelCase :Optional[Any] = int(__magic_name__ )
return PrecisionType(['no', 'fp16', 'bf16', 'fp8'][value] )
def UpperCamelCase_( __magic_name__ : Dict ):
"""simple docstring"""
_lowerCAmelCase :Tuple = int(__magic_name__ )
return SageMakerDistributedType(['NO', 'DATA_PARALLEL', 'MODEL_PARALLEL'][value] )
def UpperCamelCase_( __magic_name__ : Any ):
"""simple docstring"""
return {"yes": True, "no": False}[value.lower()]
class UpperCAmelCase_ (argparse.RawDescriptionHelpFormatter ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self: List[Any] , _UpperCAmelCase: str , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Optional[int] , _UpperCAmelCase: Dict ):
_lowerCAmelCase :int = super()._format_usage(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :List[Any] = usage.replace('<command> [<args>] ' , '' )
return usage | 382 | 1 |
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
a_ : Union[str, Any] = []
a_ : Optional[Any] = 1
while len(UpperCamelCase__ ) < 1E6:
constant.append(str(UpperCamelCase__ ) )
i += 1
a_ : Optional[int] = """""".join(UpperCamelCase__ )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[9_9999] )
* int(constant[99_9999] )
)
if __name__ == "__main__":
print(solution())
| 442 |
'''simple docstring'''
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class SCREAMING_SNAKE_CASE ( tf.keras.layers.Layer ):
def __init__( self : List[Any] , lowercase__ : Dict[str, int] , lowercase__ : List[str] , lowercase__ : int = None , lowercase__ : int = None ):
'''simple docstring'''
super().__init__()
a_ : Optional[Any] = pad_token_id
a_ : str = max_length
a_ : List[str] = vocab
a_ : Union[str, Any] = merges
a_ : Union[str, Any] = BytePairTokenizer(lowercase__ , lowercase__ , sequence_length=lowercase__ )
@classmethod
def lowercase_ ( cls : List[Any] , lowercase__ : GPTaTokenizer , *lowercase__ : Optional[Any] , **lowercase__ : int ):
'''simple docstring'''
a_ : str = [""" """.join(lowercase__ ) for m in tokenizer.bpe_ranks.keys()]
a_ : Any = tokenizer.get_vocab()
return cls(lowercase__ , lowercase__ , *lowercase__ , **lowercase__ )
@classmethod
def lowercase_ ( cls : Dict , lowercase__ : Union[str, os.PathLike] , *lowercase__ : str , **lowercase__ : Optional[int] ):
'''simple docstring'''
a_ : Optional[int] = GPTaTokenizer.from_pretrained(lowercase__ , *lowercase__ , **lowercase__ )
return cls.from_tokenizer(lowercase__ , *lowercase__ , **lowercase__ )
@classmethod
def lowercase_ ( cls : Dict , lowercase__ : Optional[Any] ):
'''simple docstring'''
return cls(**lowercase__ )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def lowercase_ ( self : Dict , lowercase__ : Union[str, Any] , lowercase__ : int = None ):
'''simple docstring'''
a_ : List[str] = self.tf_tokenizer(lowercase__ )
a_ : int = tf.ones_like(lowercase__ )
if self.pad_token_id is not None:
# pad the tokens up to max length
a_ : int = max_length if max_length is not None else self.max_length
if max_length is not None:
a_ , a_ : int = pad_model_inputs(
lowercase__ , max_seq_length=lowercase__ , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 442 | 1 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class a ( UpperCAmelCase__ ,unittest.TestCase ):
UpperCAmelCase__ : Dict = ShapEPipeline
UpperCAmelCase__ : List[Any] = ["prompt"]
UpperCAmelCase__ : Union[str, Any] = ["prompt"]
UpperCAmelCase__ : str = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
UpperCAmelCase__ : Optional[Any] = False
@property
def SCREAMING_SNAKE_CASE__ ( self : int ):
return 32
@property
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
return 32
@property
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
return self.time_input_dim * 4
@property
def SCREAMING_SNAKE_CASE__ ( self : Any ):
return 8
@property
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
__lowerCamelCase: str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def SCREAMING_SNAKE_CASE__ ( self : str ):
torch.manual_seed(0 )
__lowerCamelCase: Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(lowerCamelCase__ )
@property
def SCREAMING_SNAKE_CASE__ ( self : int ):
torch.manual_seed(0 )
__lowerCamelCase: Union[str, Any] = {
"num_attention_heads": 2,
"attention_head_dim": 16,
"embedding_dim": self.time_input_dim,
"num_embeddings": 32,
"embedding_proj_dim": self.text_embedder_hidden_size,
"time_embed_dim": self.time_embed_dim,
"num_layers": 1,
"clip_embed_dim": self.time_input_dim * 2,
"additional_embeddings": 0,
"time_embed_act_fn": "gelu",
"norm_in_type": "layer",
"encoder_hid_proj_type": None,
"added_emb_type": None,
}
__lowerCamelCase: int = PriorTransformer(**lowerCamelCase__ )
return model
@property
def SCREAMING_SNAKE_CASE__ ( self : str ):
torch.manual_seed(0 )
__lowerCamelCase: str = {
"param_shapes": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"d_latent": self.time_input_dim,
"d_hidden": self.renderer_dim,
"n_output": 12,
"background": (
0.1,
0.1,
0.1,
),
}
__lowerCamelCase: int = ShapERenderer(**lowerCamelCase__ )
return model
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
__lowerCamelCase: int = self.dummy_prior
__lowerCamelCase: Optional[Any] = self.dummy_text_encoder
__lowerCamelCase: Optional[Any] = self.dummy_tokenizer
__lowerCamelCase: Dict = self.dummy_renderer
__lowerCamelCase: Tuple = HeunDiscreteScheduler(
beta_schedule="""exp""" , num_train_timesteps=1024 , prediction_type="""sample""" , use_karras_sigmas=lowerCamelCase__ , clip_sample=lowerCamelCase__ , clip_sample_range=1.0 , )
__lowerCamelCase: Optional[int] = {
"prior": prior,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"renderer": renderer,
"scheduler": scheduler,
}
return components
def SCREAMING_SNAKE_CASE__ ( self : Any , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[str]=0 ):
if str(lowerCamelCase__ ).startswith("""mps""" ):
__lowerCamelCase: Dict = torch.manual_seed(lowerCamelCase__ )
else:
__lowerCamelCase: int = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
__lowerCamelCase: Any = {
"prompt": "horse",
"generator": generator,
"num_inference_steps": 1,
"frame_size": 32,
"output_type": "np",
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
__lowerCamelCase: Tuple = "cpu"
__lowerCamelCase: Dict = self.get_dummy_components()
__lowerCamelCase: Optional[Any] = self.pipeline_class(**lowerCamelCase__ )
__lowerCamelCase: Optional[Any] = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase: Optional[Any] = pipe(**self.get_dummy_inputs(lowerCamelCase__ ) )
__lowerCamelCase: str = output.images[0]
__lowerCamelCase: List[str] = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__lowerCamelCase: Optional[Any] = np.array(
[
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
__lowerCamelCase: int = torch_device == "cpu"
__lowerCamelCase: int = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=lowerCamelCase__ , relax_max_difference=lowerCamelCase__ , )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
__lowerCamelCase: int = self.get_dummy_components()
__lowerCamelCase: Union[str, Any] = self.pipeline_class(**lowerCamelCase__ )
__lowerCamelCase: str = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase: List[Any] = 1
__lowerCamelCase: Union[str, Any] = 2
__lowerCamelCase: int = self.get_dummy_inputs(lowerCamelCase__ )
for key in inputs.keys():
if key in self.batch_params:
__lowerCamelCase: Any = batch_size * [inputs[key]]
__lowerCamelCase: str = pipe(**lowerCamelCase__ , num_images_per_prompt=lowerCamelCase__ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self : Any ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
__lowerCamelCase: Optional[int] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/shap_e/test_shap_e_np_out.npy""" )
__lowerCamelCase: Optional[Any] = ShapEPipeline.from_pretrained("""openai/shap-e""" )
__lowerCamelCase: Dict = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase: Dict = torch.Generator(device=lowerCamelCase__ ).manual_seed(0 )
__lowerCamelCase: Optional[Any] = pipe(
"""a shark""" , generator=lowerCamelCase__ , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type="""np""" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(lowerCamelCase__ , lowerCamelCase__ )
| 718 |
_A : Tuple = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ'''
def __lowerCAmelCase ( ) -> None:
__lowerCamelCase: Optional[int] = input("""Enter message: """ )
__lowerCamelCase: Dict = input("""Enter key [alphanumeric]: """ )
__lowerCamelCase: List[Any] = input("""Encrypt/Decrypt [e/d]: """ )
if mode.lower().startswith("""e""" ):
__lowerCamelCase: Optional[int] = """encrypt"""
__lowerCamelCase: Optional[int] = encrypt_message(snake_case , snake_case )
elif mode.lower().startswith("""d""" ):
__lowerCamelCase: Union[str, Any] = """decrypt"""
__lowerCamelCase: Optional[Any] = decrypt_message(snake_case , snake_case )
print(f'\n{mode.title()}ed message:' )
print(snake_case )
def __lowerCAmelCase ( snake_case : str , snake_case : str ) -> str:
return translate_message(snake_case , snake_case , """encrypt""" )
def __lowerCAmelCase ( snake_case : str , snake_case : str ) -> str:
return translate_message(snake_case , snake_case , """decrypt""" )
def __lowerCAmelCase ( snake_case : str , snake_case : str , snake_case : str ) -> str:
__lowerCamelCase: Any = []
__lowerCamelCase: Optional[int] = 0
__lowerCamelCase: Any = key.upper()
for symbol in message:
__lowerCamelCase: int = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(snake_case )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(snake_case ):
__lowerCamelCase: Union[str, Any] = 0
else:
translated.append(snake_case )
return "".join(snake_case )
if __name__ == "__main__":
main()
| 189 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.