code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
__lowerCAmelCase : Dict ={
"configuration_speecht5": [
"SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP",
"SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP",
"SpeechT5Config",
"SpeechT5HifiGanConfig",
],
"feature_extraction_speecht5": ["SpeechT5FeatureExtractor"],
"processing_speecht5": ["SpeechT5Processor"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[Any] =["SpeechT5Tokenizer"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Tuple =[
"SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST",
"SpeechT5ForSpeechToText",
"SpeechT5ForSpeechToSpeech",
"SpeechT5ForTextToSpeech",
"SpeechT5Model",
"SpeechT5PreTrainedModel",
"SpeechT5HifiGan",
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : str =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 440 | import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
__lowerCAmelCase = ["""image_processor""", """tokenizer"""]
__lowerCAmelCase = """LayoutLMv3ImageProcessor"""
__lowerCAmelCase = ("""LayoutLMv3Tokenizer""", """LayoutLMv3TokenizerFast""")
def __init__( self : Dict , lowerCamelCase_ : Union[str, Any]=None , lowerCamelCase_ : Dict=None , **lowerCamelCase_ : str ):
"""simple docstring"""
UpperCamelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , lowerCamelCase_ , )
UpperCamelCase = kwargs.pop("""feature_extractor""" )
UpperCamelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(lowerCamelCase_ , lowerCamelCase_ )
def __call__( self : int , lowerCamelCase_ : str , lowerCamelCase_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowerCamelCase_ : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , lowerCamelCase_ : Union[List[List[int]], List[List[List[int]]]] = None , lowerCamelCase_ : Optional[Union[List[int], List[List[int]]]] = None , lowerCamelCase_ : bool = True , lowerCamelCase_ : Union[bool, str, PaddingStrategy] = False , lowerCamelCase_ : Union[bool, str, TruncationStrategy] = None , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : int = 0 , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : Optional[bool] = None , lowerCamelCase_ : Optional[bool] = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = True , lowerCamelCase_ : Optional[Union[str, TensorType]] = None , **lowerCamelCase_ : Optional[int] , ):
"""simple docstring"""
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"""You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.""" )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"""You cannot provide word labels if you initialized the image processor with apply_ocr set to True.""" )
# first, apply the image processor
UpperCamelCase = self.image_processor(images=lowerCamelCase_ , return_tensors=lowerCamelCase_ )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
UpperCamelCase = [text] # add batch dimension (as the image processor always adds a batch dimension)
UpperCamelCase = features["""words"""]
UpperCamelCase = self.tokenizer(
text=text if text is not None else features["""words"""] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["""boxes"""] , word_labels=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=lowerCamelCase_ , stride=lowerCamelCase_ , pad_to_multiple_of=lowerCamelCase_ , return_token_type_ids=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , return_overflowing_tokens=lowerCamelCase_ , return_special_tokens_mask=lowerCamelCase_ , return_offsets_mapping=lowerCamelCase_ , return_length=lowerCamelCase_ , verbose=lowerCamelCase_ , return_tensors=lowerCamelCase_ , **lowerCamelCase_ , )
# add pixel values
UpperCamelCase = features.pop("""pixel_values""" )
if return_overflowing_tokens is True:
UpperCamelCase = self.get_overflowing_images(lowerCamelCase_ , encoded_inputs["""overflow_to_sample_mapping"""] )
UpperCamelCase = images
return encoded_inputs
def lowerCamelCase_ ( self : str , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Dict ):
"""simple docstring"""
UpperCamelCase = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(lowerCamelCase_ ) != len(lowerCamelCase_ ):
raise ValueError(
"""Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"""
f""" {len(lowerCamelCase_ )} and {len(lowerCamelCase_ )}""" )
return images_with_overflow
def lowerCamelCase_ ( self : Union[str, Any] , *lowerCamelCase_ : Any , **lowerCamelCase_ : Union[str, Any] ):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCamelCase_ , **lowerCamelCase_ )
def lowerCamelCase_ ( self : List[Any] , *lowerCamelCase_ : Dict , **lowerCamelCase_ : str ):
"""simple docstring"""
return self.tokenizer.decode(*lowerCamelCase_ , **lowerCamelCase_ )
@property
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , lowerCamelCase_ , )
return self.image_processor_class
@property
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , lowerCamelCase_ , )
return self.image_processor
| 537 | 0 |
import warnings
from .generation import TFGenerationMixin
class UpperCamelCase__ ( UpperCAmelCase__):
'''simple docstring'''
warnings.warn(
"""Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will """
"""be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead.""" , UpperCAmelCase__ , )
| 433 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
__snake_case : List[str] = logging.get_logger(__name__)
__snake_case : Optional[Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__snake_case : int = {
'vocab_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/vocab.txt',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/vocab.txt',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt'
),
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt'
),
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt',
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json'
),
'bert-base-multilingual-cased': (
'https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json'
),
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-cased': (
'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json'
),
},
}
__snake_case : Tuple = {
'bert-base-uncased': 512,
'bert-large-uncased': 512,
'bert-base-cased': 512,
'bert-large-cased': 512,
'bert-base-multilingual-uncased': 512,
'bert-base-multilingual-cased': 512,
'bert-base-chinese': 512,
'bert-base-german-cased': 512,
'bert-large-uncased-whole-word-masking': 512,
'bert-large-cased-whole-word-masking': 512,
'bert-large-uncased-whole-word-masking-finetuned-squad': 512,
'bert-large-cased-whole-word-masking-finetuned-squad': 512,
'bert-base-cased-finetuned-mrpc': 512,
'bert-base-german-dbmdz-cased': 512,
'bert-base-german-dbmdz-uncased': 512,
'TurkuNLP/bert-base-finnish-cased-v1': 512,
'TurkuNLP/bert-base-finnish-uncased-v1': 512,
'wietsedv/bert-base-dutch-cased': 512,
}
__snake_case : Optional[int] = {
'bert-base-uncased': {'do_lower_case': True},
'bert-large-uncased': {'do_lower_case': True},
'bert-base-cased': {'do_lower_case': False},
'bert-large-cased': {'do_lower_case': False},
'bert-base-multilingual-uncased': {'do_lower_case': True},
'bert-base-multilingual-cased': {'do_lower_case': False},
'bert-base-chinese': {'do_lower_case': False},
'bert-base-german-cased': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking': {'do_lower_case': True},
'bert-large-cased-whole-word-masking': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking-finetuned-squad': {'do_lower_case': True},
'bert-large-cased-whole-word-masking-finetuned-squad': {'do_lower_case': False},
'bert-base-cased-finetuned-mrpc': {'do_lower_case': False},
'bert-base-german-dbmdz-cased': {'do_lower_case': False},
'bert-base-german-dbmdz-uncased': {'do_lower_case': True},
'TurkuNLP/bert-base-finnish-cased-v1': {'do_lower_case': False},
'TurkuNLP/bert-base-finnish-uncased-v1': {'do_lower_case': True},
'wietsedv/bert-base-dutch-cased': {'do_lower_case': False},
}
class UpperCamelCase__ ( UpperCAmelCase__):
'''simple docstring'''
__a : Dict = VOCAB_FILES_NAMES
__a : List[Any] = PRETRAINED_VOCAB_FILES_MAP
__a : Tuple = PRETRAINED_INIT_CONFIGURATION
__a : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a : Union[str, Any] = BertTokenizer
def __init__( self , A=None , A=None , A=True , A="[UNK]" , A="[SEP]" , A="[PAD]" , A="[CLS]" , A="[MASK]" , A=True , A=None , **A , ) ->Tuple:
super().__init__(
A , tokenizer_file=A , do_lower_case=A , unk_token=A , sep_token=A , pad_token=A , cls_token=A , mask_token=A , tokenize_chinese_chars=A , strip_accents=A , **A , )
UpperCAmelCase__ :Union[str, Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , A ) != do_lower_case
or normalizer_state.get('strip_accents' , A ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , A ) != tokenize_chinese_chars
):
UpperCAmelCase__ :Any = getattr(A , normalizer_state.pop('type' ) )
UpperCAmelCase__ :Any = do_lower_case
UpperCAmelCase__ :Tuple = strip_accents
UpperCAmelCase__ :List[Any] = tokenize_chinese_chars
UpperCAmelCase__ :Dict = normalizer_class(**A )
UpperCAmelCase__ :Dict = do_lower_case
def A__ ( self , A , A=None ) ->Optional[int]:
UpperCAmelCase__ :str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A__ ( self , A , A = None ) ->List[int]:
UpperCAmelCase__ :int = [self.sep_token_id]
UpperCAmelCase__ :Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A__ ( self , A , A = None ) ->Tuple[str]:
UpperCAmelCase__ :str = self._tokenizer.model.save(A , name=A )
return tuple(A )
| 433 | 1 |
'''simple docstring'''
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : Optional[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["bs"] , model_result["ss"] ):
lowerCAmelCase = model_result["result"][batch_size][sequence_length]
self.assertIsNotNone(__snake_case )
def __A ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase = "sshleifer/tiny-gpt2"
lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__snake_case , inference=__snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__snake_case , )
lowerCAmelCase = PyTorchBenchmark(__snake_case )
lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __A ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase = "sgugger/tiny-distilbert-classification"
lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__snake_case , inference=__snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__snake_case , only_pretrain_model=__snake_case , )
lowerCAmelCase = PyTorchBenchmark(__snake_case )
lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __A ( self : Dict ) -> str:
"""simple docstring"""
lowerCAmelCase = "sshleifer/tiny-gpt2"
lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__snake_case , inference=__snake_case , torchscript=__snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__snake_case , )
lowerCAmelCase = PyTorchBenchmark(__snake_case )
lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == "cpu" , "Cant do half precision" )
def __A ( self : Tuple ) -> int:
"""simple docstring"""
lowerCAmelCase = "sshleifer/tiny-gpt2"
lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__snake_case , inference=__snake_case , fpaa=__snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__snake_case , )
lowerCAmelCase = PyTorchBenchmark(__snake_case )
lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __A ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase = "sshleifer/tiny-gpt2"
lowerCAmelCase = AutoConfig.from_pretrained(__snake_case )
# set architectures equal to `None`
lowerCAmelCase = None
lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__snake_case , inference=__snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__snake_case , )
lowerCAmelCase = PyTorchBenchmark(__snake_case , configs=[config] )
lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __A ( self : int ) -> Any:
"""simple docstring"""
lowerCAmelCase = "sshleifer/tiny-gpt2"
lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__snake_case , inference=__snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__snake_case , )
lowerCAmelCase = PyTorchBenchmark(__snake_case )
lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == "cpu" , "Can\'t do half precision" )
def __A ( self : List[Any] ) -> Any:
"""simple docstring"""
lowerCAmelCase = "sshleifer/tiny-gpt2"
lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__snake_case , inference=__snake_case , sequence_lengths=[8] , batch_sizes=[1] , fpaa=__snake_case , multi_process=__snake_case , )
lowerCAmelCase = PyTorchBenchmark(__snake_case )
lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __A ( self : int ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase = "sshleifer/tiny-gpt2"
lowerCAmelCase = AutoConfig.from_pretrained(__snake_case )
lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__snake_case , inference=__snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__snake_case , )
lowerCAmelCase = PyTorchBenchmark(__snake_case , configs=[config] )
lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __A ( self : str ) -> str:
"""simple docstring"""
lowerCAmelCase = "sshleifer/tinier_bart"
lowerCAmelCase = AutoConfig.from_pretrained(__snake_case )
lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__snake_case , inference=__snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__snake_case , )
lowerCAmelCase = PyTorchBenchmark(__snake_case , configs=[config] )
lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __A ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase = "sshleifer/tiny-gpt2"
lowerCAmelCase = AutoConfig.from_pretrained(__snake_case )
lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__snake_case , inference=__snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__snake_case , )
lowerCAmelCase = PyTorchBenchmark(__snake_case , configs=[config] )
lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __A ( self : Tuple ) -> Dict:
"""simple docstring"""
lowerCAmelCase = "sshleifer/tinier_bart"
lowerCAmelCase = AutoConfig.from_pretrained(__snake_case )
lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__snake_case , inference=__snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__snake_case , )
lowerCAmelCase = PyTorchBenchmark(__snake_case , configs=[config] )
lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __A ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase = "sshleifer/tiny-gpt2"
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__snake_case , inference=__snake_case , save_to_csv=__snake_case , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(__snake_case , "inf_time.csv" ) , train_memory_csv_file=os.path.join(__snake_case , "train_mem.csv" ) , inference_memory_csv_file=os.path.join(__snake_case , "inf_mem.csv" ) , train_time_csv_file=os.path.join(__snake_case , "train_time.csv" ) , env_info_csv_file=os.path.join(__snake_case , "env.csv" ) , multi_process=__snake_case , )
lowerCAmelCase = PyTorchBenchmark(__snake_case )
benchmark.run()
self.assertTrue(Path(os.path.join(__snake_case , "inf_time.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(__snake_case , "train_time.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(__snake_case , "inf_mem.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(__snake_case , "train_mem.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(__snake_case , "env.csv" ) ).exists() )
def __A ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase = "sshleifer/tiny-gpt2"
def _check_summary_is_not_empty(SCREAMING_SNAKE_CASE : str ):
self.assertTrue(hasattr(__snake_case , "sequential" ) )
self.assertTrue(hasattr(__snake_case , "cumulative" ) )
self.assertTrue(hasattr(__snake_case , "current" ) )
self.assertTrue(hasattr(__snake_case , "total" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__snake_case , inference=__snake_case , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(__snake_case , "log.txt" ) , log_print=__snake_case , trace_memory_line_by_line=__snake_case , multi_process=__snake_case , )
lowerCAmelCase = PyTorchBenchmark(__snake_case )
lowerCAmelCase = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(__snake_case , "log.txt" ) ).exists() )
| 649 |
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class snake_case_ (nn.Module ):
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : float = 0.0
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : bool = True
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : jnp.dtype = jnp.floataa
def lowerCamelCase__( self :int ) -> Dict:
a__ = []
a__ = []
for i in range(self.num_layers ):
a__ = self.in_channels if i == 0 else self.out_channels
a__ = FlaxResnetBlockaD(
in_channels=__snake_case ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(__snake_case )
a__ = FlaxTransformeraDModel(
in_channels=self.out_channels ,n_heads=self.num_attention_heads ,d_head=self.out_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,only_cross_attention=self.only_cross_attention ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,)
attentions.append(__snake_case )
a__ = resnets
a__ = attentions
if self.add_downsample:
a__ = FlaxDownsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self :Dict ,__snake_case :str ,__snake_case :Optional[Any] ,__snake_case :Optional[int] ,__snake_case :Tuple=True ) -> Tuple:
a__ = ()
for resnet, attn in zip(self.resnets ,self.attentions ):
a__ = resnet(__snake_case ,__snake_case ,deterministic=__snake_case )
a__ = attn(__snake_case ,__snake_case ,deterministic=__snake_case )
output_states += (hidden_states,)
if self.add_downsample:
a__ = self.downsamplers_a(__snake_case )
output_states += (hidden_states,)
return hidden_states, output_states
class snake_case_ (nn.Module ):
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : float = 0.0
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : bool = True
UpperCAmelCase__ : jnp.dtype = jnp.floataa
def lowerCamelCase__( self :Optional[Any] ) -> Dict:
a__ = []
for i in range(self.num_layers ):
a__ = self.in_channels if i == 0 else self.out_channels
a__ = FlaxResnetBlockaD(
in_channels=__snake_case ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(__snake_case )
a__ = resnets
if self.add_downsample:
a__ = FlaxDownsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self :Optional[Any] ,__snake_case :str ,__snake_case :Dict ,__snake_case :Any=True ) -> List[Any]:
a__ = ()
for resnet in self.resnets:
a__ = resnet(__snake_case ,__snake_case ,deterministic=__snake_case )
output_states += (hidden_states,)
if self.add_downsample:
a__ = self.downsamplers_a(__snake_case )
output_states += (hidden_states,)
return hidden_states, output_states
class snake_case_ (nn.Module ):
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : float = 0.0
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : bool = True
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : jnp.dtype = jnp.floataa
def lowerCamelCase__( self :Tuple ) -> List[str]:
a__ = []
a__ = []
for i in range(self.num_layers ):
a__ = self.in_channels if (i == self.num_layers - 1) else self.out_channels
a__ = self.prev_output_channel if i == 0 else self.out_channels
a__ = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(__snake_case )
a__ = FlaxTransformeraDModel(
in_channels=self.out_channels ,n_heads=self.num_attention_heads ,d_head=self.out_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,only_cross_attention=self.only_cross_attention ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,)
attentions.append(__snake_case )
a__ = resnets
a__ = attentions
if self.add_upsample:
a__ = FlaxUpsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self :List[str] ,__snake_case :int ,__snake_case :List[Any] ,__snake_case :Union[str, Any] ,__snake_case :Union[str, Any] ,__snake_case :Dict=True ) -> int:
for resnet, attn in zip(self.resnets ,self.attentions ):
# pop res hidden states
a__ = res_hidden_states_tuple[-1]
a__ = res_hidden_states_tuple[:-1]
a__ = jnp.concatenate((hidden_states, res_hidden_states) ,axis=-1 )
a__ = resnet(__snake_case ,__snake_case ,deterministic=__snake_case )
a__ = attn(__snake_case ,__snake_case ,deterministic=__snake_case )
if self.add_upsample:
a__ = self.upsamplers_a(__snake_case )
return hidden_states
class snake_case_ (nn.Module ):
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : float = 0.0
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : bool = True
UpperCAmelCase__ : jnp.dtype = jnp.floataa
def lowerCamelCase__( self :Union[str, Any] ) -> Any:
a__ = []
for i in range(self.num_layers ):
a__ = self.in_channels if (i == self.num_layers - 1) else self.out_channels
a__ = self.prev_output_channel if i == 0 else self.out_channels
a__ = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(__snake_case )
a__ = resnets
if self.add_upsample:
a__ = FlaxUpsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self :Optional[int] ,__snake_case :List[Any] ,__snake_case :int ,__snake_case :Optional[Any] ,__snake_case :Optional[Any]=True ) -> List[str]:
for resnet in self.resnets:
# pop res hidden states
a__ = res_hidden_states_tuple[-1]
a__ = res_hidden_states_tuple[:-1]
a__ = jnp.concatenate((hidden_states, res_hidden_states) ,axis=-1 )
a__ = resnet(__snake_case ,__snake_case ,deterministic=__snake_case )
if self.add_upsample:
a__ = self.upsamplers_a(__snake_case )
return hidden_states
class snake_case_ (nn.Module ):
UpperCAmelCase__ : int
UpperCAmelCase__ : float = 0.0
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : jnp.dtype = jnp.floataa
def lowerCamelCase__( self :Tuple ) -> List[Any]:
# there is always at least one resnet
a__ = [
FlaxResnetBlockaD(
in_channels=self.in_channels ,out_channels=self.in_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
]
a__ = []
for _ in range(self.num_layers ):
a__ = FlaxTransformeraDModel(
in_channels=self.in_channels ,n_heads=self.num_attention_heads ,d_head=self.in_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,)
attentions.append(__snake_case )
a__ = FlaxResnetBlockaD(
in_channels=self.in_channels ,out_channels=self.in_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(__snake_case )
a__ = resnets
a__ = attentions
def __call__( self :Optional[Any] ,__snake_case :Union[str, Any] ,__snake_case :List[str] ,__snake_case :int ,__snake_case :int=True ) -> str:
a__ = self.resnets[0](__snake_case ,__snake_case )
for attn, resnet in zip(self.attentions ,self.resnets[1:] ):
a__ = attn(__snake_case ,__snake_case ,deterministic=__snake_case )
a__ = resnet(__snake_case ,__snake_case ,deterministic=__snake_case )
return hidden_states
| 335 | 0 |
'''simple docstring'''
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
snake_case_ = {
'facebook/maskformer-swin-base-ade': (
'https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json'
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
snake_case_ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
_A = "maskformer"
_A = {"hidden_size": "mask_feature_size"}
_A = ["resnet", "swin"]
_A = ["detr"]
def __init__( self , lowercase__ = 256 , lowercase__ = 256 , lowercase__ = 0.1 , lowercase__ = False , lowercase__ = None , lowercase__ = None , lowercase__ = 0.02 , lowercase__ = 1.0 , lowercase__ = 1.0 , lowercase__ = 1.0 , lowercase__ = 20.0 , lowercase__ = None , **lowercase__ , ):
"""simple docstring"""
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
SCREAMING_SNAKE_CASE_ : List[Any] = SwinConfig(
image_size=384 , in_channels=3 , patch_size=4 , embed_dim=128 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=["stage1", "stage2", "stage3", "stage4"] , )
if isinstance(lowercase__ , lowercase__ ):
SCREAMING_SNAKE_CASE_ : List[Any] = backbone_config.pop("model_type" )
SCREAMING_SNAKE_CASE_ : Any = CONFIG_MAPPING[backbone_model_type]
SCREAMING_SNAKE_CASE_ : Optional[int] = config_class.from_dict(lowercase__ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F"Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. "
F"Supported model types: {','.join(self.backbones_supported )}" )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
SCREAMING_SNAKE_CASE_ : Tuple = DetrConfig()
else:
# verify that the decoder is supported
SCREAMING_SNAKE_CASE_ : Optional[Any] = (
decoder_config.pop("model_type" ) if isinstance(lowercase__ , lowercase__ ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
F"Transformer Decoder {decoder_type} not supported, please use one of"
F" {','.join(self.decoders_supported )}" )
if isinstance(lowercase__ , lowercase__ ):
SCREAMING_SNAKE_CASE_ : List[Any] = CONFIG_MAPPING[decoder_type]
SCREAMING_SNAKE_CASE_ : Tuple = config_class.from_dict(lowercase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = backbone_config
SCREAMING_SNAKE_CASE_ : int = decoder_config
# main feature dimension for the model
SCREAMING_SNAKE_CASE_ : Union[str, Any] = fpn_feature_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = mask_feature_size
# initializer
SCREAMING_SNAKE_CASE_ : Optional[int] = init_std
SCREAMING_SNAKE_CASE_ : int = init_xavier_std
# Hungarian matcher && loss
SCREAMING_SNAKE_CASE_ : str = cross_entropy_weight
SCREAMING_SNAKE_CASE_ : List[Any] = dice_weight
SCREAMING_SNAKE_CASE_ : Any = mask_weight
SCREAMING_SNAKE_CASE_ : Dict = use_auxiliary_loss
SCREAMING_SNAKE_CASE_ : Dict = no_object_weight
SCREAMING_SNAKE_CASE_ : Optional[int] = output_auxiliary_logits
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.decoder_config.encoder_attention_heads
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.decoder_config.num_hidden_layers
super().__init__(**lowercase__ )
@classmethod
def __lowerCamelCase ( cls , lowercase__ , lowercase__ , **lowercase__ ):
"""simple docstring"""
return cls(
backbone_config=lowercase__ , decoder_config=lowercase__ , **lowercase__ , )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE_ : List[Any] = self.backbone_config.to_dict()
SCREAMING_SNAKE_CASE_ : Dict = self.decoder_config.to_dict()
SCREAMING_SNAKE_CASE_ : int = self.__class__.model_type
return output
| 718 |
'''simple docstring'''
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , ) -> float:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError("All input parameters must be positive" )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError("Relative densities cannot be greater than one" )
else:
SCREAMING_SNAKE_CASE_ : int = 1 - (matter_density + radiation_density + dark_energy)
SCREAMING_SNAKE_CASE_ : List[Any] = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
SCREAMING_SNAKE_CASE_ : Dict = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
snake_case_ = 0.3
print(
hubble_parameter(
hubble_constant=6_8.3,
radiation_density=1E-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 68 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
from transformers.pipelines import AudioClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torchaudio,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class lowerCAmelCase_ ( unittest.TestCase ):
__lowerCamelCase : Union[str, Any] = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
__lowerCamelCase : Union[str, Any] = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
_lowerCAmelCase = AudioClassificationPipeline(model=UpperCAmelCase__ , feature_extractor=UpperCAmelCase__ )
# test with a raw waveform
_lowerCAmelCase = np.zeros((34000,) )
_lowerCAmelCase = np.zeros((14000,) )
return audio_classifier, [audioa, audio]
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> List[str]:
_lowerCAmelCase , _lowerCAmelCase = examples
_lowerCAmelCase = audio_classifier(UpperCAmelCase__ )
# by default a model is initialized with num_labels=2
self.assertEqual(
UpperCAmelCase__ , [
{"score": ANY(UpperCAmelCase__ ), "label": ANY(UpperCAmelCase__ )},
{"score": ANY(UpperCAmelCase__ ), "label": ANY(UpperCAmelCase__ )},
] , )
_lowerCAmelCase = audio_classifier(UpperCAmelCase__ , top_k=1 )
self.assertEqual(
UpperCAmelCase__ , [
{"score": ANY(UpperCAmelCase__ ), "label": ANY(UpperCAmelCase__ )},
] , )
self.run_torchaudio(UpperCAmelCase__ )
@require_torchaudio
def _snake_case ( self , _lowerCAmelCase ) -> Optional[int]:
import datasets
# test with a local file
_lowerCAmelCase = datasets.load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
_lowerCAmelCase = dataset[0]["audio"]["array"]
_lowerCAmelCase = audio_classifier(UpperCAmelCase__ )
self.assertEqual(
UpperCAmelCase__ , [
{"score": ANY(UpperCAmelCase__ ), "label": ANY(UpperCAmelCase__ )},
{"score": ANY(UpperCAmelCase__ ), "label": ANY(UpperCAmelCase__ )},
] , )
@require_torch
def _snake_case ( self ) -> Optional[Any]:
_lowerCAmelCase = "anton-l/wav2vec2-random-tiny-classifier"
_lowerCAmelCase = pipeline("audio-classification" , model=UpperCAmelCase__ )
_lowerCAmelCase = np.ones((8000,) )
_lowerCAmelCase = audio_classifier(UpperCAmelCase__ , top_k=4 )
_lowerCAmelCase = [
{"score": 0.0842, "label": "no"},
{"score": 0.0838, "label": "up"},
{"score": 0.0837, "label": "go"},
{"score": 0.0834, "label": "right"},
]
_lowerCAmelCase = [
{"score": 0.0845, "label": "stop"},
{"score": 0.0844, "label": "on"},
{"score": 0.0841, "label": "right"},
{"score": 0.0834, "label": "left"},
]
self.assertIn(nested_simplify(UpperCAmelCase__ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
_lowerCAmelCase = {"array": np.ones((8000,) ), "sampling_rate": audio_classifier.feature_extractor.sampling_rate}
_lowerCAmelCase = audio_classifier(UpperCAmelCase__ , top_k=4 )
self.assertIn(nested_simplify(UpperCAmelCase__ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
@require_torch
@slow
def _snake_case ( self ) -> List[Any]:
import datasets
_lowerCAmelCase = "superb/wav2vec2-base-superb-ks"
_lowerCAmelCase = pipeline("audio-classification" , model=UpperCAmelCase__ )
_lowerCAmelCase = datasets.load_dataset("anton-l/superb_dummy" , "ks" , split="test" )
_lowerCAmelCase = np.array(dataset[3]["speech"] , dtype=np.floataa )
_lowerCAmelCase = audio_classifier(UpperCAmelCase__ , top_k=4 )
self.assertEqual(
nested_simplify(UpperCAmelCase__ , decimals=3 ) , [
{"score": 0.981, "label": "go"},
{"score": 0.007, "label": "up"},
{"score": 0.006, "label": "_unknown_"},
{"score": 0.001, "label": "down"},
] , )
@require_tf
@unittest.skip("Audio classification is not implemented for TF" )
def _snake_case ( self ) -> Union[str, Any]:
pass
| 18 |
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
UpperCAmelCase_ : Any = subprocess.check_output("git merge-base main HEAD".split()).decode("utf-8")
UpperCAmelCase_ : Union[str, Any] = subprocess.check_output(F'''git diff --name-only {fork_point_sha}'''.split()).decode("utf-8").split()
UpperCAmelCase_ : Optional[int] = "|".join(sys.argv[1:])
UpperCAmelCase_ : List[Any] = re.compile(RF'''^({joined_dirs}).*?\.py$''')
UpperCAmelCase_ : List[Any] = [x for x in modified_files if regex.match(x)]
print(" ".join(relevant_modified_files), end="")
| 491 | 0 |
'''simple docstring'''
import random
def _lowerCamelCase ( __A : Optional[Any] , __A : Optional[int] , __A : Dict ) -> Optional[int]:
_UpperCAmelCase : Dict = a[left_index]
_UpperCAmelCase : Tuple = left_index + 1
for j in range(left_index + 1 , __A ):
if a[j] < pivot:
_UpperCAmelCase , _UpperCAmelCase : str = a[i], a[j]
i += 1
_UpperCAmelCase , _UpperCAmelCase : Dict = a[i - 1], a[left_index]
return i - 1
def _lowerCamelCase ( __A : str , __A : str , __A : Optional[int] ) -> int:
if left < right:
_UpperCAmelCase : Union[str, Any] = random.randint(__A , right - 1 )
_UpperCAmelCase , _UpperCAmelCase : Any = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
_UpperCAmelCase : Optional[int] = partition(__A , __A , __A )
quick_sort_random(
__A , __A , __A ) # recursive quicksort to the left of the pivot point
quick_sort_random(
__A , pivot_index + 1 , __A ) # recursive quicksort to the right of the pivot point
def _lowerCamelCase ( ) -> Tuple:
_UpperCAmelCase : Dict = input('''Enter numbers separated by a comma:\n''' ).strip()
_UpperCAmelCase : Dict = [int(__A ) for item in user_input.split(''',''' )]
quick_sort_random(__A , 0 , len(__A ) )
print(__A )
if __name__ == "__main__":
main()
| 710 |
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
@add_end_docstrings(__lowercase )
class A_ ( __lowercase ):
'''simple docstring'''
def __init__( self , **_A) -> int:
"""simple docstring"""
super().__init__(**_A)
if self.framework != "pt":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''')
# No specific FOR_XXX available yet
def __call__( self , _A , **_A) -> List[str]:
"""simple docstring"""
return super().__call__(_A , **_A)
def snake_case__ ( self , **_A) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = {}
if "candidate_labels" in kwargs:
_UpperCAmelCase : Union[str, Any] = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
_UpperCAmelCase : Dict = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def snake_case__ ( self , _A , _A=None , _A="This is a sound of {}.") -> Tuple:
"""simple docstring"""
if isinstance(_A , _A):
if audio.startswith('''http://''') or audio.startswith('''https://'''):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
_UpperCAmelCase : List[Any] = requests.get(_A).content
else:
with open(_A , '''rb''') as f:
_UpperCAmelCase : List[str] = f.read()
if isinstance(_A , _A):
_UpperCAmelCase : List[Any] = ffmpeg_read(_A , self.feature_extractor.sampling_rate)
if not isinstance(_A , np.ndarray):
raise ValueError('''We expect a numpy ndarray as input''')
if len(audio.shape) != 1:
raise ValueError('''We expect a single channel audio input for ZeroShotAudioClassificationPipeline''')
_UpperCAmelCase : Optional[Any] = self.feature_extractor(
[audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors='''pt''')
_UpperCAmelCase : Dict = candidate_labels
_UpperCAmelCase : List[str] = [hypothesis_template.format(_A) for x in candidate_labels]
_UpperCAmelCase : Optional[Any] = self.tokenizer(_A , return_tensors=self.framework , padding=_A)
_UpperCAmelCase : str = [text_inputs]
return inputs
def snake_case__ ( self , _A) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = model_inputs.pop('''candidate_labels''')
_UpperCAmelCase : Union[str, Any] = model_inputs.pop('''text_inputs''')
if isinstance(text_inputs[0] , _A):
_UpperCAmelCase : int = text_inputs[0]
else:
# Batching case.
_UpperCAmelCase : Dict = text_inputs[0][0]
_UpperCAmelCase : str = self.model(**_A , **_A)
_UpperCAmelCase : str = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_audio,
}
return model_outputs
def snake_case__ ( self , _A) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : str = model_outputs.pop('''candidate_labels''')
_UpperCAmelCase : str = model_outputs['''logits'''][0]
if self.framework == "pt":
_UpperCAmelCase : int = logits.softmax(dim=0)
_UpperCAmelCase : Optional[Any] = probs.tolist()
else:
raise ValueError('''`tf` framework not supported.''')
_UpperCAmelCase : List[str] = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(_A , _A) , key=lambda _A: -x[0])
]
return result
| 186 | 0 |
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def lowerCAmelCase__ ( a__: Optional[int] , a__: List[Any] , a__: Any ) -> int:
'''simple docstring'''
_UpperCAmelCase = AlbertConfig.from_json_file(a__ )
print(F'''Building PyTorch model from configuration: {config}''' )
_UpperCAmelCase = AlbertForPreTraining(a__ )
# Load weights from tf checkpoint
load_tf_weights_in_albert(a__ , a__ , a__ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , a__ )
if __name__ == "__main__":
lowerCAmelCase__ :Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--albert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained ALBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCAmelCase__ :str = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 618 |
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError('''To use the rich extension, install rich with `pip install rich`''')
| 618 | 1 |
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'''facebook/encodec_24khz''': '''https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json''',
'''facebook/encodec_48khz''': '''https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json''',
}
class _lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ ='''encodec'''
def __init__( self , __SCREAMING_SNAKE_CASE=[1.5, 3.0, 6.0, 12.0, 24.0] , __SCREAMING_SNAKE_CASE=2_4000 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=128 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=[8, 5, 4, 2] , __SCREAMING_SNAKE_CASE="weight_norm" , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE="reflect" , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=1.0 , __SCREAMING_SNAKE_CASE=1024 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , **__SCREAMING_SNAKE_CASE , ) -> Any:
"""simple docstring"""
snake_case__ : Tuple =target_bandwidths
snake_case__ : List[Any] =sampling_rate
snake_case__ : str =audio_channels
snake_case__ : Tuple =normalize
snake_case__ : List[str] =chunk_length_s
snake_case__ : List[Any] =overlap
snake_case__ : str =hidden_size
snake_case__ : List[str] =num_filters
snake_case__ : Union[str, Any] =num_residual_layers
snake_case__ : str =upsampling_ratios
snake_case__ : Union[str, Any] =norm_type
snake_case__ : Optional[int] =kernel_size
snake_case__ : int =last_kernel_size
snake_case__ : List[str] =residual_kernel_size
snake_case__ : Optional[Any] =dilation_growth_rate
snake_case__ : Optional[Any] =use_causal_conv
snake_case__ : Union[str, Any] =pad_mode
snake_case__ : List[str] =compress
snake_case__ : List[str] =num_lstm_layers
snake_case__ : int =trim_right_ratio
snake_case__ : str =codebook_size
snake_case__ : List[Any] =codebook_dim if codebook_dim is not None else hidden_size
snake_case__ : Any =use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f'''self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}''' )
super().__init__(**__SCREAMING_SNAKE_CASE )
@property
def UpperCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def UpperCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def UpperCAmelCase ( self ) -> int:
"""simple docstring"""
snake_case__ : str =np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def UpperCAmelCase ( self ) -> int:
"""simple docstring"""
return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 408 |
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
lowerCamelCase__ = logging.get_logger(__name__)
class _lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ =['''input_features''', '''attention_mask''']
def __init__( self , __SCREAMING_SNAKE_CASE=80 , __SCREAMING_SNAKE_CASE=1_6000 , __SCREAMING_SNAKE_CASE=80 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , **__SCREAMING_SNAKE_CASE , ) -> List[str]:
"""simple docstring"""
super().__init__(feature_size=__SCREAMING_SNAKE_CASE , sampling_rate=__SCREAMING_SNAKE_CASE , padding_value=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] =num_mel_bins
snake_case__ : int =do_ceptral_normalize
snake_case__ : Dict =normalize_means
snake_case__ : str =normalize_vars
snake_case__ : Optional[Any] =True
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , ) -> np.ndarray:
"""simple docstring"""
snake_case__ : List[str] =waveform * (2**15) # Kaldi compliance: 16-bit signed integers
snake_case__ : int =torch.from_numpy(__SCREAMING_SNAKE_CASE ).unsqueeze(0 )
snake_case__ : Optional[int] =ta_kaldi.fbank(__SCREAMING_SNAKE_CASE , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def UpperCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = 0.0 , ) -> np.ndarray:
"""simple docstring"""
if normalize_means:
snake_case__ : Any =x[:input_length].mean(axis=0 )
snake_case__ : Optional[Any] =np.subtract(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if normalize_vars:
snake_case__ : int =x[:input_length].std(axis=0 )
snake_case__ : Optional[Any] =np.divide(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if input_length < x.shape[0]:
snake_case__ : Tuple =padding_value
# make sure array is in float32
snake_case__ : Tuple =x.astype(np.floataa )
return x
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) -> List[np.ndarray]:
"""simple docstring"""
snake_case__ : Union[str, Any] =attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , self.normalize_means , self.normalize_vars , self.padding_value )
for x, n in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
]
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ) -> BatchFeature:
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
f''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'''
f''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
snake_case__ : List[Any] =isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
snake_case__ : Optional[int] =is_batched_numpy or (
isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
snake_case__ : str =[np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ):
snake_case__ : Any =np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa )
elif isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
snake_case__ : Optional[Any] =raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
snake_case__ : Optional[Any] =[raw_speech]
# extract fbank features
snake_case__ : List[Any] =[self._extract_fbank_features(__SCREAMING_SNAKE_CASE ) for waveform in raw_speech]
# convert into correct format for padding
snake_case__ : Optional[int] =BatchFeature({'''input_features''': features} )
snake_case__ : List[Any] =self.pad(
__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , pad_to_multiple_of=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
# make sure list is in array format
snake_case__ : int =padded_inputs.get('''input_features''' )
if isinstance(input_features[0] , __SCREAMING_SNAKE_CASE ):
snake_case__ : Dict =[np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa ) for feature in input_features]
snake_case__ : Tuple =padded_inputs.get('''attention_mask''' )
if attention_mask is not None:
snake_case__ : Dict =[np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
snake_case__ : List[str] =(
np.array(__SCREAMING_SNAKE_CASE , dtype=np.intaa )
if self._get_padding_strategies(__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE ) is not PaddingStrategy.DO_NOT_PAD
else None
)
snake_case__ : Union[str, Any] =self.normalize(
padded_inputs['''input_features'''] , attention_mask=__SCREAMING_SNAKE_CASE )
if return_tensors is not None:
snake_case__ : Optional[int] =padded_inputs.convert_to_tensors(__SCREAMING_SNAKE_CASE )
return padded_inputs
| 408 | 1 |
'''simple docstring'''
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
lowerCAmelCase_ : Dict = {
'cola': 2,
'mnli': 3,
'mrpc': 2,
'sst-2': 2,
'sts-b': 1,
'qqp': 2,
'qnli': 2,
'rte': 2,
'wnli': 2,
}
logging.set_verbosity_info()
def _lowerCamelCase ( lowercase : Union[str, Any] , lowercase : List[str] , lowercase : Dict , lowercase : int=None ) -> Dict:
# Initialise PyTorch model
_a = XLNetConfig.from_json_file(lowercase )
_a = finetuning_task.lower() if finetuning_task is not None else ""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(F'Building PyTorch XLNetForSequenceClassification model from configuration: {config}' )
_a = finetuning_task
_a = GLUE_TASKS_NUM_LABELS[finetuning_task]
_a = XLNetForSequenceClassification(lowercase )
elif "squad" in finetuning_task:
_a = finetuning_task
_a = XLNetForQuestionAnswering(lowercase )
else:
_a = XLNetLMHeadModel(lowercase )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(lowercase , lowercase , lowercase )
# Save pytorch-model
_a = os.path.join(lowercase , lowercase )
_a = os.path.join(lowercase , lowercase )
print(F'Save PyTorch model to {os.path.abspath(lowercase )}' )
torch.save(model.state_dict() , lowercase )
print(F'Save configuration file to {os.path.abspath(lowercase )}' )
with open(lowercase , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowerCAmelCase_ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--xlnet_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained XLNet model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the folder to store the PyTorch model or dataset/vocab.',
)
parser.add_argument(
'--finetuning_task',
default=None,
type=str,
help='Name of a task on which the XLNet TensorFlow model was fine-tuned',
)
lowerCAmelCase_ : Any = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 692 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : str , lowercase : list[str] ) -> str:
_a = ""
for word_or_phrase in separated:
if not isinstance(lowercase , lowercase ):
raise Exception("join() accepts only strings to be joined" )
joined += word_or_phrase + separator
return joined.strip(lowercase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 692 | 1 |
"""simple docstring"""
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class __A (unittest.TestCase):
'''simple docstring'''
def __init__( self : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Tuple=7 , UpperCAmelCase_ : Dict=3 , UpperCAmelCase_ : Tuple=18 , UpperCAmelCase_ : Optional[Any]=30 , UpperCAmelCase_ : str=400 , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : Optional[Any]=True , ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = size if size is not None else {"""height""": 18, """width""": 18}
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = num_channels
snake_case_ = image_size
snake_case_ = min_resolution
snake_case_ = max_resolution
snake_case_ = do_resize
snake_case_ = size
snake_case_ = do_normalize
def lowerCAmelCase ( self : List[str] ) ->Optional[Any]:
"""simple docstring"""
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8_866_443_634_033_203, 0.6_618_829_369_544_983, 0.3_891_746_401_786_804],
[-0.6_042_559_146_881_104, -0.02_295_008_860_528_469, 0.5_423_797_369_003_296],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class __A (snake_case__ , unittest.TestCase):
'''simple docstring'''
__lowercase: List[Any] = ImageGPTImageProcessor if is_vision_available() else None
def lowerCAmelCase ( self : Optional[int] ) ->Optional[int]:
"""simple docstring"""
snake_case_ = ImageGPTImageProcessingTester(self )
@property
def lowerCAmelCase ( self : Tuple ) ->List[Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase ( self : Optional[Any] ) ->List[str]:
"""simple docstring"""
snake_case_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase_ , """clusters""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """do_resize""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """size""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """do_normalize""" ) )
def lowerCAmelCase ( self : Optional[int] ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
snake_case_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def lowerCAmelCase ( self : Any ) ->List[Any]:
"""simple docstring"""
snake_case_ = self.image_processing_class(**self.image_processor_dict )
snake_case_ = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(UpperCAmelCase_ , obj[key] ) )
else:
self.assertEqual(obj[key] , UpperCAmelCase_ )
def lowerCAmelCase ( self : Optional[int] ) ->Dict:
"""simple docstring"""
snake_case_ = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case_ = os.path.join(UpperCAmelCase_ , """image_processor.json""" )
image_processor_first.to_json_file(UpperCAmelCase_ )
snake_case_ = self.image_processing_class.from_json_file(UpperCAmelCase_ ).to_dict()
snake_case_ = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(UpperCAmelCase_ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , UpperCAmelCase_ )
def lowerCAmelCase ( self : Optional[Any] ) ->List[Any]:
"""simple docstring"""
snake_case_ = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(UpperCAmelCase_ )
snake_case_ = self.image_processing_class.from_pretrained(UpperCAmelCase_ ).to_dict()
snake_case_ = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(UpperCAmelCase_ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , UpperCAmelCase_ )
@unittest.skip("""ImageGPT requires clusters at initialization""" )
def lowerCAmelCase ( self : List[Any] ) ->Tuple:
"""simple docstring"""
pass
def _a ( ) -> str:
snake_case_ = load_dataset("""hf-internal-testing/fixtures_image_utils""" , split="""test""" )
snake_case_ = Image.open(dataset[4]["""file"""] )
snake_case_ = Image.open(dataset[5]["""file"""] )
snake_case_ = [imagea, imagea]
return images
@require_vision
@require_torch
class __A (unittest.TestCase):
'''simple docstring'''
@slow
def lowerCAmelCase ( self : Tuple ) ->List[str]:
"""simple docstring"""
snake_case_ = ImageGPTImageProcessor.from_pretrained("""openai/imagegpt-small""" )
snake_case_ = prepare_images()
# test non-batched
snake_case_ = image_processing(images[0] , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 1_024) )
snake_case_ = [306, 191, 191]
self.assertEqual(encoding.input_ids[0, :3].tolist() , UpperCAmelCase_ )
# test batched
snake_case_ = image_processing(UpperCAmelCase_ , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 1_024) )
snake_case_ = [303, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , UpperCAmelCase_ )
| 2 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__SCREAMING_SNAKE_CASE : Optional[Any] = {
'configuration_biogpt': ['BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BioGptConfig'],
'tokenization_biogpt': ['BioGptTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = [
'BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BioGptForCausalLM',
'BioGptForTokenClassification',
'BioGptForSequenceClassification',
'BioGptModel',
'BioGptPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 2 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=7 , _UpperCAmelCase=3 , _UpperCAmelCase=18 , _UpperCAmelCase=30 , _UpperCAmelCase=400 , _UpperCAmelCase=True , _UpperCAmelCase=None , _UpperCAmelCase=True , ):
'''simple docstring'''
__A : List[Any] = size if size is not None else {'height': 18, 'width': 18}
__A : Tuple = parent
__A : str = batch_size
__A : Optional[int] = num_channels
__A : str = image_size
__A : List[str] = min_resolution
__A : Optional[Any] = max_resolution
__A : str = do_resize
__A : Dict = size
__A : Optional[Any] = apply_ocr
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class SCREAMING_SNAKE_CASE (a__ , unittest.TestCase ):
lowerCAmelCase = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = LayoutLMvaImageProcessingTester(self)
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(_UpperCAmelCase , 'do_resize'))
self.assertTrue(hasattr(_UpperCAmelCase , 'size'))
self.assertTrue(hasattr(_UpperCAmelCase , 'apply_ocr'))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Dict = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'height': 18, 'width': 18})
__A : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=42)
self.assertEqual(image_processor.size , {'height': 42, 'width': 42})
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[str] = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
__A : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase)
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image)
# Test not batched input
__A : int = image_processing(image_inputs[0] , return_tensors='pt')
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
self.assertIsInstance(encoding.words , _UpperCAmelCase)
self.assertIsInstance(encoding.boxes , _UpperCAmelCase)
# Test batched
__A : str = image_processing(_UpperCAmelCase , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
__A : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase)
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , np.ndarray)
# Test not batched input
__A : Optional[int] = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__A : Any = image_processing(_UpperCAmelCase , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
__A : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase)
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor)
# Test not batched input
__A : Tuple = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__A : int = image_processing(_UpperCAmelCase , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = LayoutLMvaImageProcessor()
from datasets import load_dataset
__A : str = load_dataset('hf-internal-testing/fixtures_docvqa' , split='test')
__A : List[Any] = Image.open(ds[0]['file']).convert('RGB')
__A : Optional[int] = image_processing(_UpperCAmelCase , return_tensors='pt')
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224))
self.assertEqual(len(encoding.words) , len(encoding.boxes))
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
__A : str = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
__A : Tuple = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , _UpperCAmelCase)
self.assertListEqual(encoding.boxes , _UpperCAmelCase)
# with apply_OCR = False
__A : str = LayoutLMvaImageProcessor(apply_ocr=_UpperCAmelCase)
__A : List[str] = image_processing(_UpperCAmelCase , return_tensors='pt')
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224)) | 8 |
'''simple docstring'''
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def __lowercase (_SCREAMING_SNAKE_CASE :str = "laptop" ):
SCREAMING_SNAKE_CASE : str = F'''https://www.amazon.in/laptop/s?k={product}'''
SCREAMING_SNAKE_CASE : int = {
'''User-Agent''': '''Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36
(KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36''',
'''Accept-Language''': '''en-US, en;q=0.5''',
}
SCREAMING_SNAKE_CASE : Dict = BeautifulSoup(requests.get(_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE ).text )
# Initialize a Pandas dataframe with the column titles
SCREAMING_SNAKE_CASE : str = DataFrame(
columns=[
'''Product Title''',
'''Product Link''',
'''Current Price of the product''',
'''Product Rating''',
'''MRP of the product''',
'''Discount''',
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
'''div''' , attrs={'''class''': '''s-result-item''', '''data-component-type''': '''s-search-result'''} , ) , soup.find_all('''div''' , attrs={'''class''': '''a-row a-size-base a-color-base'''} ) , ):
try:
SCREAMING_SNAKE_CASE : Optional[int] = item.ha.text
SCREAMING_SNAKE_CASE : Tuple = '''https://www.amazon.in/''' + item.ha.a['''href''']
SCREAMING_SNAKE_CASE : Any = item.find('''span''' , attrs={'''class''': '''a-offscreen'''} ).text
try:
SCREAMING_SNAKE_CASE : Optional[int] = item.find('''span''' , attrs={'''class''': '''a-icon-alt'''} ).text
except AttributeError:
SCREAMING_SNAKE_CASE : Any = '''Not available'''
try:
SCREAMING_SNAKE_CASE : Union[str, Any] = (
'''₹'''
+ item.find(
'''span''' , attrs={'''class''': '''a-price a-text-price'''} ).text.split('''₹''' )[1]
)
except AttributeError:
SCREAMING_SNAKE_CASE : List[str] = ''''''
try:
SCREAMING_SNAKE_CASE : Optional[Any] = float(
(
(
float(product_mrp.strip('''₹''' ).replace(''',''' , '''''' ) )
- float(product_price.strip('''₹''' ).replace(''',''' , '''''' ) )
)
/ float(product_mrp.strip('''₹''' ).replace(''',''' , '''''' ) )
)
* 1_00 )
except ValueError:
SCREAMING_SNAKE_CASE : Optional[Any] = float('''nan''' )
except AttributeError:
pass
SCREAMING_SNAKE_CASE : int = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
SCREAMING_SNAKE_CASE : int = ''' '''
SCREAMING_SNAKE_CASE : List[str] = ''' '''
data_frame.index += 1
return data_frame
if __name__ == "__main__":
snake_case_ = """headphones"""
get_amazon_product_data(product).to_csv(f'''Amazon Product Data for {product}.csv''')
| 507 | 0 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
a__: Any = logging.get_logger(__name__)
a__: Optional[int] = {
'speechbrain/m-ctc-t-large': 'https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json',
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = '''mctct'''
def __init__( self,__lowerCamelCase=8065,__lowerCamelCase=1536,__lowerCamelCase=36,__lowerCamelCase=6144,__lowerCamelCase=4,__lowerCamelCase=384,__lowerCamelCase=920,__lowerCamelCase=1E-5,__lowerCamelCase=0.3,__lowerCamelCase="relu",__lowerCamelCase=0.02,__lowerCamelCase=0.3,__lowerCamelCase=0.3,__lowerCamelCase=1,__lowerCamelCase=0,__lowerCamelCase=2,__lowerCamelCase=1,__lowerCamelCase=0.3,__lowerCamelCase=1,__lowerCamelCase=(7,),__lowerCamelCase=(3,),__lowerCamelCase=80,__lowerCamelCase=1,__lowerCamelCase=None,__lowerCamelCase="sum",__lowerCamelCase=False,**__lowerCamelCase,):
super().__init__(**__lowerCamelCase,pad_token_id=__lowerCamelCase,bos_token_id=__lowerCamelCase,eos_token_id=__lowerCamelCase )
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = intermediate_size
A__ = num_attention_heads
A__ = attention_head_dim
A__ = max_position_embeddings
A__ = layer_norm_eps
A__ = layerdrop
A__ = hidden_act
A__ = initializer_range
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = pad_token_id
A__ = bos_token_id
A__ = eos_token_id
A__ = conv_glu_dim
A__ = conv_dropout
A__ = num_conv_layers
A__ = input_feat_per_channel
A__ = input_channels
A__ = conv_channels
A__ = ctc_loss_reduction
A__ = ctc_zero_infinity
# prevents config testing fail with exporting to json
A__ = list(__lowerCamelCase )
A__ = list(__lowerCamelCase )
if len(self.conv_kernel ) != self.num_conv_layers:
raise ValueError(
'''Configuration for convolutional module is incorrect. '''
'''It is required that `len(config.conv_kernel)` == `config.num_conv_layers` '''
f"but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, "
f"`config.num_conv_layers = {self.num_conv_layers}`." )
| 212 |
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
a__: List[Any] = logging.get_logger(__name__)
a__: Union[str, Any] = 'T5Config'
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = '''mt5'''
__SCREAMING_SNAKE_CASE = MTaConfig
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = '''mt5'''
__SCREAMING_SNAKE_CASE = MTaConfig
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = '''mt5'''
__SCREAMING_SNAKE_CASE = MTaConfig
| 212 | 1 |
def __lowerCamelCase ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[str] ) -> Optional[int]:
# Return True if there is node that has not iterated.
__UpperCamelCase : Any = [False] * len(__A )
__UpperCamelCase : Union[str, Any] = []
queue.append(__A )
__UpperCamelCase : Optional[int] = True
while queue:
__UpperCamelCase : Optional[Any] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(__A )
__UpperCamelCase : List[str] = True
__UpperCamelCase : Any = u
return visited[t]
def __lowerCamelCase ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Union[str, Any] ) -> Tuple:
# This array is filled by BFS and to store path
__UpperCamelCase : Dict = [-1] * (len(__A ))
__UpperCamelCase : Optional[int] = 0
while bfs(__A , __A , __A , __A ):
__UpperCamelCase : Optional[int] = float("""Inf""" )
__UpperCamelCase : List[Any] = sink
while s != source:
# Find the minimum value in select path
__UpperCamelCase : str = min(__A , graph[parent[s]][s] )
__UpperCamelCase : str = parent[s]
max_flow += path_flow
__UpperCamelCase : Dict = sink
while v != source:
__UpperCamelCase : List[Any] = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
__UpperCamelCase : Dict = parent[v]
return max_flow
UpperCamelCase = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
UpperCamelCase = 0, 5
print(ford_fulkerson(graph, source, sink))
| 269 |
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
lowercase : Tuple = get_tests_dir("fixtures/test_sentencepiece.model")
lowercase : Optional[int] = {"target_lang": "fi", "source_lang": "en"}
lowercase : Any = ">>zh<<"
lowercase : Optional[int] = "Helsinki-NLP/"
if is_torch_available():
lowercase : Optional[int] = "pt"
elif is_tf_available():
lowercase : Tuple = "tf"
else:
lowercase : Any = "jax"
@require_sentencepiece
class __UpperCAmelCase ( _lowerCamelCase , unittest.TestCase ):
__lowercase = MarianTokenizer
__lowercase = False
__lowercase = True
def lowerCamelCase ( self ):
"""simple docstring"""
super().setUp()
_snake_case = ['</s>', '<unk>', '▁This', '▁is', '▁a', '▁t', 'est', '\u0120', '<pad>']
_snake_case = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
_snake_case = Path(self.tmpdirname )
save_json(lowerCAmelCase_ , save_dir / VOCAB_FILES_NAMES['vocab'] )
save_json(lowerCAmelCase_ , save_dir / VOCAB_FILES_NAMES['tokenizer_config_file'] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(lowerCAmelCase_ , save_dir / VOCAB_FILES_NAMES['source_spm'] )
copyfile(lowerCAmelCase_ , save_dir / VOCAB_FILES_NAMES['target_spm'] )
_snake_case = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase ( self , **lowerCAmelCase_ ):
"""simple docstring"""
return MarianTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
return (
"This is a test",
"This is a test",
)
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = '</s>'
_snake_case = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase_ ) , lowerCAmelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase_ ) , lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '</s>' )
self.assertEqual(vocab_keys[1] , '<unk>' )
self.assertEqual(vocab_keys[-1] , '<pad>' )
self.assertEqual(len(lowerCAmelCase_ ) , 9 )
def lowerCamelCase ( self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = MarianTokenizer.from_pretrained(F'{ORG_NAME}opus-mt-en-de' )
_snake_case = en_de_tokenizer(['I am a small frog'] , return_tensors=lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = [38, 1_21, 14, 6_97, 3_88_48, 0]
self.assertListEqual(lowerCAmelCase_ , batch.input_ids[0] )
_snake_case = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(lowerCAmelCase_ )
_snake_case = [x.name for x in Path(lowerCAmelCase_ ).glob('*' )]
self.assertIn('source.spm' , lowerCAmelCase_ )
MarianTokenizer.from_pretrained(lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.get_tokenizer()
_snake_case = tok(
['I am a small frog' * 10_00, 'I am a small frog'] , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(batch.input_ids.shape , (2, 5_12) )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.get_tokenizer()
_snake_case = tok(['I am a tiny frog', 'I am a small frog'] , padding=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = {'input_ids': [[4_34_95, 4_62, 20, 4_21_64, 13_69, 52, 4_64, 1_32, 17_03, 4_92, 13, 74_91, 3_89_99, 6, 8, 4_64, 1_32, 17_03, 4_92, 13, 46_69, 3_78_67, 13, 75_25, 27, 15_93, 9_88, 13, 3_39_72, 70_29, 6, 20, 82_51, 3_83, 2, 2_70, 58_66, 37_88, 2, 23_53, 82_51, 1_23_38, 2, 1_39_58, 3_87, 2, 36_29, 69_53, 1_88, 29_00, 2, 1_39_58, 80_11, 1_15_01, 23, 84_60, 40_73, 3_40_09, 20, 4_35, 1_14_39, 27, 8, 84_60, 40_73, 60_04, 20, 99_88, 3_75, 27, 33, 2_66, 19_45, 10_76, 13_50, 3_78_67, 32_88, 5, 5_77, 10_76, 43_74, 8, 50_82, 5, 2_64_53, 2_57, 5_56, 4_03, 2, 2_42, 1_32, 3_83, 3_16, 4_92, 8, 1_07_67, 6, 3_16, 3_04, 42_39, 3, 0], [1_48, 1_57_22, 19, 18_39, 12, 13_50, 13, 2_23_27, 50_82, 54_18, 4_75_67, 3_59_38, 59, 3_18, 1_95_52, 1_08, 21_83, 54, 1_49_76, 48_35, 32, 5_47, 11_14, 8, 3_15, 24_17, 5, 92, 1_90_88, 3, 0, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00], [36, 63_95, 1_25_70, 3_91_47, 1_15_97, 6, 2_66, 4, 4_54_05, 72_96, 3, 0, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase_ , model_name='Helsinki-NLP/opus-mt-en-de' , revision='1a8c2263da11e68e50938f97e10cd57820bd504c' , decode_kwargs={'use_source_tokenizer': True} , )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = MarianTokenizer.from_pretrained('hf-internal-testing/test-marian-two-vocabs' )
_snake_case = 'Tämä on testi'
_snake_case = 'This is a test'
_snake_case = [76, 7, 20_47, 2]
_snake_case = [69, 12, 11, 9_40, 2]
_snake_case = tokenizer(lowerCAmelCase_ ).input_ids
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = tokenizer(text_target=lowerCAmelCase_ ).input_ids
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = tokenizer.decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
| 495 | 0 |
"""simple docstring"""
import numpy
# List of input, output pairs
UpperCamelCase = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
UpperCamelCase = (((515, 22, 13), 555), ((61, 35, 49), 150))
UpperCamelCase = [2, 4, 1, 5]
UpperCamelCase = len(train_data)
UpperCamelCase = 0.009
def lowerCAmelCase ( UpperCamelCase_: Optional[Any] , UpperCamelCase_: Any="train" ) -> List[str]:
'''simple docstring'''
return calculate_hypothesis_value(UpperCamelCase_ , UpperCamelCase_ ) - output(
UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase ( UpperCamelCase_: Dict ) -> Optional[Any]:
'''simple docstring'''
_a = 0
for i in range(len(UpperCamelCase_ ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def lowerCAmelCase ( UpperCamelCase_: Dict , UpperCamelCase_: Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def lowerCAmelCase ( UpperCamelCase_: Dict , UpperCamelCase_: Tuple ) -> List[Any]:
'''simple docstring'''
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def lowerCAmelCase ( UpperCamelCase_: List[Any] , UpperCamelCase_: Optional[int]=m ) -> Optional[Any]:
'''simple docstring'''
_a = 0
for i in range(UpperCamelCase_ ):
if index == -1:
summation_value += _error(UpperCamelCase_ )
else:
summation_value += _error(UpperCamelCase_ ) * train_data[i][0][index]
return summation_value
def lowerCAmelCase ( UpperCamelCase_: Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
_a = summation_of_cost_derivative(UpperCamelCase_ , UpperCamelCase_ ) / m
return cost_derivative_value
def lowerCAmelCase ( ) -> List[str]:
'''simple docstring'''
global parameter_vector
# Tune these values to set a tolerance value for predicted output
_a = 0.00_0002
_a = 0
_a = 0
while True:
j += 1
_a = [0, 0, 0, 0]
for i in range(0 , len(UpperCamelCase_ ) ):
_a = get_cost_derivative(i - 1 )
_a = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
UpperCamelCase_ , UpperCamelCase_ , atol=UpperCamelCase_ , rtol=UpperCamelCase_ , ):
break
_a = temp_parameter_vector
print(("Number of iterations:", j) )
def lowerCAmelCase ( ) -> Any:
'''simple docstring'''
for i in range(len(UpperCamelCase_ ) ):
print(("Actual output value:", output(UpperCamelCase_ , "test" )) )
print(("Hypothesis output:", calculate_hypothesis_value(UpperCamelCase_ , "test" )) )
if __name__ == "__main__":
run_gradient_descent()
print("""\nTesting gradient descent for a linear hypothesis function.\n""")
test_gradient_descent()
| 612 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class lowercase_ (_UpperCAmelCase ):
A__ : str = '''Salesforce/blip-image-captioning-base'''
A__ : Optional[int] = (
'''This is a tool that generates a description of an image. It takes an input named `image` which should be the '''
'''image to caption, and returns a text that contains the description in English.'''
)
A__ : str = '''image_captioner'''
A__ : Tuple = AutoModelForVisionaSeq
A__ : Tuple = ['''image''']
A__ : List[Any] = ['''text''']
def __init__( self , *a_ , **a_ ) ->Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["vision"] )
super().__init__(*a_ , **a_ )
def lowerCamelCase__ ( self , a_ ) ->Union[str, Any]:
'''simple docstring'''
return self.pre_processor(images=a_ , return_tensors="pt" )
def lowerCamelCase__ ( self , a_ ) ->List[Any]:
'''simple docstring'''
return self.model.generate(**a_ )
def lowerCamelCase__ ( self , a_ ) ->Tuple:
'''simple docstring'''
return self.pre_processor.batch_decode(a_ , skip_special_tokens=a_ )[0].strip()
| 612 | 1 |
"""simple docstring"""
# flake8: noqa
# Lint as: python3
a_ = [
'VerificationMode',
'Version',
'disable_progress_bar',
'enable_progress_bar',
'is_progress_bar_enabled',
'experimental',
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 76 |
def __lowerCamelCase ( _lowerCAmelCase ) -> str:
_UpperCAmelCase = []
_UpperCAmelCase = set({"(", "[", "{"} )
_UpperCAmelCase = set({")", "]", "}"} )
_UpperCAmelCase = {"{": "}", "[": "]", "(": ")"}
for i in range(len(_lowerCAmelCase ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(_lowerCAmelCase ) == 0 or (len(_lowerCAmelCase ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(_lowerCAmelCase ) == 0
def __lowerCamelCase ( ) -> str:
_UpperCAmelCase = input("Enter sequence of brackets: " )
if is_balanced(_lowerCAmelCase ):
print(_lowerCAmelCase , "is balanced" )
else:
print(_lowerCAmelCase , "is not balanced" )
if __name__ == "__main__":
main()
| 684 | 0 |
"""simple docstring"""
import math
import unittest
def __lowerCAmelCase (_UpperCamelCase ):
assert isinstance(_UpperCamelCase , _UpperCamelCase ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_UpperCamelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class A__ ( unittest.TestCase):
def __lowerCamelCase ( self ):
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def __lowerCamelCase ( self ):
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ) , 'Zero doesn\'t have any positive factors, primes must have exactly two.' , )
self.assertFalse(
is_prime(1 ) , 'One only has 1 positive factor, primes must have exactly two.' , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main() | 549 |
"""simple docstring"""
import inspect
import unittest
from transformers import RegNetConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import RegNetForImageClassification, RegNetModel
from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A__ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=[10, 20, 30, 40] , _SCREAMING_SNAKE_CASE=[1, 1, 2, 1] , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE="relu" , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=None , ):
__lowerCAmelCase : Optional[Any] = parent
__lowerCAmelCase : int = batch_size
__lowerCAmelCase : Union[str, Any] = image_size
__lowerCAmelCase : Optional[Any] = num_channels
__lowerCAmelCase : List[Any] = embeddings_size
__lowerCAmelCase : Dict = hidden_sizes
__lowerCAmelCase : Optional[int] = depths
__lowerCAmelCase : Optional[Any] = is_training
__lowerCAmelCase : int = use_labels
__lowerCAmelCase : List[Any] = hidden_act
__lowerCAmelCase : Optional[Any] = num_labels
__lowerCAmelCase : List[str] = scope
__lowerCAmelCase : Optional[Any] = len(_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase : List[str] = None
if self.use_labels:
__lowerCAmelCase : Tuple = ids_tensor([self.batch_size] , self.num_labels )
__lowerCAmelCase : int = self.get_config()
return config, pixel_values, labels
def __lowerCamelCase ( self ):
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Any = RegNetModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : Any = model(_SCREAMING_SNAKE_CASE )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[Any] = self.num_labels
__lowerCAmelCase : str = RegNetForImageClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : Any = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Any = config_and_inputs
__lowerCAmelCase : List[str] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class A__ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase):
A_ : List[str] = (RegNetModel, RegNetForImageClassification) if is_torch_available() else ()
A_ : List[Any] = (
{'feature-extraction': RegNetModel, 'image-classification': RegNetForImageClassification}
if is_torch_available()
else {}
)
A_ : Dict = False
A_ : str = False
A_ : int = False
A_ : Union[str, Any] = False
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[str] = RegNetModelTester(self )
__lowerCAmelCase : int = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowerCamelCase ( self ):
return
@unittest.skip(reason='RegNet does not use inputs_embeds' )
def __lowerCamelCase ( self ):
pass
@unittest.skip(reason='RegNet does not support input and output embeddings' )
def __lowerCamelCase ( self ):
pass
def __lowerCamelCase ( self ):
__lowerCAmelCase , __lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase : Optional[int] = model_class(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase : Tuple = [*signature.parameters.keys()]
__lowerCAmelCase : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase , __lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase : Union[str, Any] = model_class(config=_SCREAMING_SNAKE_CASE )
for name, module in model.named_modules():
if isinstance(_SCREAMING_SNAKE_CASE , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
def __lowerCamelCase ( self ):
def check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Any = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
__lowerCAmelCase : List[str] = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase : Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowerCAmelCase : List[str] = self.model_tester.num_stages
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
__lowerCAmelCase , __lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase : List[Any] = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
__lowerCAmelCase : List[Any] = layer_type
__lowerCAmelCase : Tuple = True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCAmelCase : Dict = True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : Dict = RegNetModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ():
__lowerCAmelCase : str = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class A__ ( unittest.TestCase):
@cached_property
def __lowerCamelCase ( self ):
return (
AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[str] = RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = self.default_image_processor
__lowerCAmelCase : Any = prepare_img()
__lowerCAmelCase : Any = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors='pt' ).to(_SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
__lowerCAmelCase : List[str] = model(**_SCREAMING_SNAKE_CASE )
# verify the logits
__lowerCAmelCase : Union[str, Any] = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = torch.tensor([-0.4180, -1.5051, -3.4836] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) ) | 549 | 1 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> Tuple:
_lowercase = 384
_lowercase = 7
if "tiny" in model_name:
_lowercase = 96
_lowercase = (2, 2, 6, 2)
_lowercase = (3, 6, 12, 24)
elif "small" in model_name:
_lowercase = 96
_lowercase = (2, 2, 18, 2)
_lowercase = (3, 6, 12, 24)
elif "base" in model_name:
_lowercase = 128
_lowercase = (2, 2, 18, 2)
_lowercase = (4, 8, 16, 32)
_lowercase = 12
_lowercase = 512
elif "large" in model_name:
_lowercase = 192
_lowercase = (2, 2, 18, 2)
_lowercase = (6, 12, 24, 48)
_lowercase = 12
_lowercase = 768
# set label information
_lowercase = 150
_lowercase = 'huggingface/label-files'
_lowercase = 'ade20k-id2label.json'
_lowercase = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type='dataset' ) , 'r' ) )
_lowercase = {int(snake_case__ ): v for k, v in idalabel.items()}
_lowercase = {v: k for k, v in idalabel.items()}
_lowercase = SwinConfig(
embed_dim=snake_case__ , depths=snake_case__ , num_heads=snake_case__ , window_size=snake_case__ , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , )
_lowercase = UperNetConfig(
backbone_config=snake_case__ , auxiliary_in_channels=snake_case__ , num_labels=snake_case__ , idalabel=snake_case__ , labelaid=snake_case__ , )
return config
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Union[str, Any] ) -> List[Any]:
_lowercase = []
# fmt: off
# stem
rename_keys.append(('backbone.patch_embed.projection.weight', 'backbone.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('backbone.patch_embed.projection.bias', 'backbone.embeddings.patch_embeddings.projection.bias') )
rename_keys.append(('backbone.patch_embed.norm.weight', 'backbone.embeddings.norm.weight') )
rename_keys.append(('backbone.patch_embed.norm.bias', 'backbone.embeddings.norm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.norm1.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.norm1.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table""", F"""backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index""", F"""backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.norm2.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.norm2.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((F"""backbone.stages.{i}.downsample.reduction.weight""", F"""backbone.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((F"""backbone.stages.{i}.downsample.norm.weight""", F"""backbone.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((F"""backbone.stages.{i}.downsample.norm.bias""", F"""backbone.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append((F"""backbone.norm{i}.weight""", F"""backbone.hidden_states_norms.stage{i+1}.weight""") )
rename_keys.append((F"""backbone.norm{i}.bias""", F"""backbone.hidden_states_norms.stage{i+1}.bias""") )
# decode head
rename_keys.extend(
[
('decode_head.conv_seg.weight', 'decode_head.classifier.weight'),
('decode_head.conv_seg.bias', 'decode_head.classifier.bias'),
('auxiliary_head.conv_seg.weight', 'auxiliary_head.classifier.weight'),
('auxiliary_head.conv_seg.bias', 'auxiliary_head.classifier.bias'),
] )
# fmt: on
return rename_keys
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Any , snake_case__ :List[Any] , snake_case__ :List[str] ) -> Any:
_lowercase = dct.pop(snake_case__ )
_lowercase = val
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str , snake_case__ :Tuple ) -> Union[str, Any]:
_lowercase = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
_lowercase = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
_lowercase = state_dict.pop(F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight""" )
_lowercase = state_dict.pop(F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_lowercase = in_proj_weight[:dim, :]
_lowercase = in_proj_bias[: dim]
_lowercase = in_proj_weight[
dim : dim * 2, :
]
_lowercase = in_proj_bias[
dim : dim * 2
]
_lowercase = in_proj_weight[
-dim :, :
]
_lowercase = in_proj_bias[-dim :]
# fmt: on
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> Optional[Any]:
_lowercase , _lowercase = x.shape
_lowercase = x.reshape(snake_case__ , 4 , in_channel // 4 )
_lowercase = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(snake_case__ , snake_case__ )
return x
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int ) -> List[Any]:
_lowercase , _lowercase = x.shape
_lowercase = x.reshape(snake_case__ , in_channel // 4 , 4 )
_lowercase = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(snake_case__ , snake_case__ )
return x
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> Tuple:
_lowercase = x.shape[0]
_lowercase = x.reshape(4 , in_channel // 4 )
_lowercase = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(snake_case__ )
return x
def SCREAMING_SNAKE_CASE__ ( snake_case__ :List[Any] ) -> Any:
_lowercase = x.shape[0]
_lowercase = x.reshape(in_channel // 4 , 4 )
_lowercase = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(snake_case__ )
return x
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Optional[int] , snake_case__ :str , snake_case__ :Optional[Any] ) -> Optional[Any]:
_lowercase = {
'upernet-swin-tiny': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth',
'upernet-swin-small': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth',
'upernet-swin-base': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth',
'upernet-swin-large': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth',
}
_lowercase = model_name_to_url[model_name]
_lowercase = torch.hub.load_state_dict_from_url(snake_case__ , map_location='cpu' , file_name=snake_case__ )[
'state_dict'
]
for name, param in state_dict.items():
print(snake_case__ , param.shape )
_lowercase = get_upernet_config(snake_case__ )
_lowercase = UperNetForSemanticSegmentation(snake_case__ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
_lowercase = state_dict.pop(snake_case__ )
if "bn" in key:
_lowercase = key.replace('bn' , 'batch_norm' )
_lowercase = val
# rename keys
_lowercase = create_rename_keys(snake_case__ )
for src, dest in rename_keys:
rename_key(snake_case__ , snake_case__ , snake_case__ )
read_in_q_k_v(snake_case__ , config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
_lowercase = reverse_correct_unfold_reduction_order(snake_case__ )
if "norm" in key:
_lowercase = reverse_correct_unfold_norm_order(snake_case__ )
model.load_state_dict(snake_case__ )
# verify on image
_lowercase = 'https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'
_lowercase = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ).convert('RGB' )
_lowercase = SegformerImageProcessor()
_lowercase = processor(snake_case__ , return_tensors='pt' ).pixel_values
with torch.no_grad():
_lowercase = model(snake_case__ )
_lowercase = outputs.logits
print(logits.shape )
print('First values of logits:' , logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
_lowercase = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] )
elif model_name == "upernet-swin-small":
_lowercase = torch.tensor(
[[-7.1921, -7.1921, -6.9532], [-7.1921, -7.1921, -6.9532], [-7.0908, -7.0908, -6.8534]] )
elif model_name == "upernet-swin-base":
_lowercase = torch.tensor(
[[-6.5851, -6.5851, -6.4330], [-6.5851, -6.5851, -6.4330], [-6.4763, -6.4763, -6.3254]] )
elif model_name == "upernet-swin-large":
_lowercase = torch.tensor(
[[-7.5297, -7.5297, -7.3802], [-7.5297, -7.5297, -7.3802], [-7.4044, -7.4044, -7.2586]] )
print('Logits:' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , snake_case__ , atol=1E-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(snake_case__ )
print(F"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(snake_case__ )
if push_to_hub:
print(F"""Pushing model and processor for {model_name} to hub""" )
model.push_to_hub(F"""openmmlab/{model_name}""" )
processor.push_to_hub(F"""openmmlab/{model_name}""" )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""upernet-swin-tiny""",
type=str,
choices=[F"""upernet-swin-{size}""" for size in ["""tiny""", """small""", """base""", """large"""]],
help="""Name of the Swin + UperNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
snake_case = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 67 |
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class A_ :
"""simple docstring"""
def __init__( self : Dict ,__A : Any ,__A : Tuple=None ,__A : Optional[int]=None ,__A : Optional[int]=None ,__A : int="resnet50" ,__A : int=3 ,__A : List[Any]=32 ,__A : Tuple=3 ,__A : List[Any]=True ,__A : Tuple=True ,) -> Any:
_lowercase = parent
_lowercase = out_indices if out_indices is not None else [4]
_lowercase = stage_names
_lowercase = out_features
_lowercase = backbone
_lowercase = batch_size
_lowercase = image_size
_lowercase = num_channels
_lowercase = use_pretrained_backbone
_lowercase = is_training
def __UpperCAmelCase ( self : List[Any] ) -> Tuple:
_lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowercase = self.get_config()
return config, pixel_values
def __UpperCAmelCase ( self : Tuple ) -> Tuple:
return TimmBackboneConfig(
image_size=self.image_size ,num_channels=self.num_channels ,out_features=self.out_features ,out_indices=self.out_indices ,stage_names=self.stage_names ,use_pretrained_backbone=self.use_pretrained_backbone ,backbone=self.backbone ,)
def __UpperCAmelCase ( self : Any ,__A : Any ,__A : Dict ) -> Union[str, Any]:
_lowercase = TimmBackbone(config=__A )
model.to(__A )
model.eval()
with torch.no_grad():
_lowercase = model(__A )
self.parent.assertEqual(
result.feature_map[-1].shape ,(self.batch_size, model.channels[-1], 14, 14) ,)
def __UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
_lowercase = self.prepare_config_and_inputs()
_lowercase , _lowercase = config_and_inputs
_lowercase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class A_ ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (TimmBackbone,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ : List[str] = {'''feature-extraction''': TimmBackbone} if is_torch_available() else {}
SCREAMING_SNAKE_CASE_ : List[Any] = False
SCREAMING_SNAKE_CASE_ : Tuple = False
SCREAMING_SNAKE_CASE_ : List[str] = False
SCREAMING_SNAKE_CASE_ : Any = False
def __UpperCAmelCase ( self : str ) -> Optional[int]:
_lowercase = TimmBackboneModelTester(self )
_lowercase = ConfigTester(self ,config_class=__A ,has_text_modality=__A )
def __UpperCAmelCase ( self : int ) -> Tuple:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCAmelCase ( self : List[Any] ) -> List[str]:
_lowercase = 'resnet18'
_lowercase = 'microsoft/resnet-18'
_lowercase = AutoBackbone.from_pretrained(__A ,use_timm_backbone=__A )
_lowercase = AutoBackbone.from_pretrained(__A )
self.assertEqual(len(timm_model.out_features ) ,len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) ,len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels ,transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices ,(-1,) )
self.assertEqual(transformers_model.out_indices ,[len(timm_model.stage_names ) - 1] )
_lowercase = AutoBackbone.from_pretrained(__A ,use_timm_backbone=__A ,out_indices=[1, 2, 3] )
_lowercase = AutoBackbone.from_pretrained(__A ,out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices ,transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) ,len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels ,transformers_model.channels )
@unittest.skip('TimmBackbone doesn\'t support feed forward chunking' )
def __UpperCAmelCase ( self : Optional[int] ) -> Any:
pass
@unittest.skip('TimmBackbone doesn\'t have num_hidden_layers attribute' )
def __UpperCAmelCase ( self : Optional[int] ) -> Any:
pass
@unittest.skip('TimmBackbone initialization is managed on the timm side' )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def __UpperCAmelCase ( self : Dict ) -> List[str]:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def __UpperCAmelCase ( self : Optional[int] ) -> Dict:
pass
@unittest.skip('TimmBackbone model cannot be created without specifying a backbone checkpoint' )
def __UpperCAmelCase ( self : Dict ) -> List[str]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def __UpperCAmelCase ( self : Dict ) -> Optional[Any]:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def __UpperCAmelCase ( self : Any ) -> List[Any]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def __UpperCAmelCase ( self : int ) -> Any:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def __UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
pass
@unittest.skip('TimmBackbone doesn\'t have hidden size info in its configuration.' )
def __UpperCAmelCase ( self : Any ) -> Optional[Any]:
pass
@unittest.skip('TimmBackbone doesn\'t support output_attentions.' )
def __UpperCAmelCase ( self : Any ) -> Any:
pass
@unittest.skip('Safetensors is not supported by timm.' )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __UpperCAmelCase ( self : int ) -> Optional[Any]:
pass
def __UpperCAmelCase ( self : Dict ) -> int:
_lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase = model_class(__A )
_lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowercase = [*signature.parameters.keys()]
_lowercase = ['pixel_values']
self.assertListEqual(arg_names[:1] ,__A )
def __UpperCAmelCase ( self : Tuple ) -> List[str]:
_lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common()
_lowercase = True
_lowercase = self.has_attentions
# no need to test all models as different heads yield the same functionality
_lowercase = self.all_model_classes[0]
_lowercase = model_class(__A )
model.to(__A )
_lowercase = self._prepare_for_class(__A ,__A )
_lowercase = model(**__A )
_lowercase = outputs[0][-1]
# Encoder-/Decoder-only models
_lowercase = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
_lowercase = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=__A )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def __UpperCAmelCase ( self : List[str] ) -> int:
_lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase = model_class(__A )
model.to(__A )
model.eval()
_lowercase = model(**__A )
self.assertEqual(len(result.feature_maps ) ,len(config.out_indices ) )
self.assertEqual(len(model.channels ) ,len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
_lowercase = copy.deepcopy(__A )
_lowercase = None
_lowercase = model_class(__A )
model.to(__A )
model.eval()
_lowercase = model(**__A )
self.assertEqual(len(result.feature_maps ) ,1 )
self.assertEqual(len(model.channels ) ,1 )
# Check backbone can be initialized with fresh weights
_lowercase = copy.deepcopy(__A )
_lowercase = False
_lowercase = model_class(__A )
model.to(__A )
model.eval()
_lowercase = model(**__A ) | 67 | 1 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
lowerCamelCase =random.Random()
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__=1.0 , UpperCamelCase__=None , UpperCamelCase__=None ):
if rng is None:
UpperCamelCase__ : Optional[Any] = global_rng
UpperCamelCase__ : str = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=4_0_0 , __SCREAMING_SNAKE_CASE=2_0_0_0 , __SCREAMING_SNAKE_CASE=2_0_4_8 , __SCREAMING_SNAKE_CASE=1_2_8 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=5_1_2 , __SCREAMING_SNAKE_CASE=3_0 , __SCREAMING_SNAKE_CASE=4_4_1_0_0 , ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : List[Any] = parent
UpperCamelCase__ : Union[str, Any] = batch_size
UpperCamelCase__ : Optional[int] = min_seq_length
UpperCamelCase__ : Optional[Any] = max_seq_length
UpperCamelCase__ : List[Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCamelCase__ : Optional[Any] = spectrogram_length
UpperCamelCase__ : Dict = feature_size
UpperCamelCase__ : Optional[Any] = num_audio_channels
UpperCamelCase__ : Union[str, Any] = hop_length
UpperCamelCase__ : str = chunk_length
UpperCamelCase__ : str = sampling_rate
def __SCREAMING_SNAKE_CASE ( self ) -> int:
"""simple docstring"""
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False ) -> Optional[int]:
"""simple docstring"""
def _flatten(__SCREAMING_SNAKE_CASE ):
return list(itertools.chain(*__SCREAMING_SNAKE_CASE ) )
if equal_length:
UpperCamelCase__ : Union[str, Any] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
UpperCamelCase__ : Optional[Any] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
UpperCamelCase__ : Dict = [np.asarray(__SCREAMING_SNAKE_CASE ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _lowerCamelCase ( UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = TvltFeatureExtractor
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : Any = TvltFeatureExtractionTester(self )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''spectrogram_length''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''feature_size''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''num_audio_channels''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''hop_length''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''chunk_length''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''sampling_rate''' ) )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
"""simple docstring"""
UpperCamelCase__ : List[str] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase__ : Optional[Any] = feat_extract_first.save_pretrained(__SCREAMING_SNAKE_CASE )[0]
check_json_file_has_correct_format(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = self.feature_extraction_class.from_pretrained(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = feat_extract_first.to_dict()
UpperCamelCase__ : List[str] = feat_extract_second.to_dict()
UpperCamelCase__ : Optional[int] = dict_first.pop('''mel_filters''' )
UpperCamelCase__ : List[Any] = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase__ : Optional[int] = os.path.join(__SCREAMING_SNAKE_CASE , '''feat_extract.json''' )
feat_extract_first.to_json_file(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] = self.feature_extraction_class.from_json_file(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : str = feat_extract_first.to_dict()
UpperCamelCase__ : Optional[int] = feat_extract_second.to_dict()
UpperCamelCase__ : Optional[int] = dict_first.pop('''mel_filters''' )
UpperCamelCase__ : Dict = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
"""simple docstring"""
UpperCamelCase__ : int = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
UpperCamelCase__ : Optional[Any] = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCamelCase__ : Optional[int] = [np.asarray(__SCREAMING_SNAKE_CASE ) for speech_input in speech_inputs]
# Test not batched input
UpperCamelCase__ : Optional[Any] = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
UpperCamelCase__ : Union[str, Any] = feature_extractor(__SCREAMING_SNAKE_CASE , return_tensors='''np''' , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
UpperCamelCase__ : List[Any] = feature_extractor(
__SCREAMING_SNAKE_CASE , return_tensors='''np''' , sampling_rate=4_4_1_0_0 , mask_audio=__SCREAMING_SNAKE_CASE ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
UpperCamelCase__ : Union[str, Any] = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
UpperCamelCase__ : Optional[Any] = np.asarray(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : int = feature_extractor(__SCREAMING_SNAKE_CASE , return_tensors='''np''' , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : int = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
UpperCamelCase__ : int = ds.sort('''id''' ).select(range(__SCREAMING_SNAKE_CASE ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def __SCREAMING_SNAKE_CASE ( self ) -> str:
"""simple docstring"""
UpperCamelCase__ : List[Any] = self._load_datasamples(1 )
UpperCamelCase__ : Optional[int] = TvltFeatureExtractor()
UpperCamelCase__ : Any = feature_extractor(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 1_9_2, 1_2_8) )
UpperCamelCase__ : Tuple = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 462 |
lowerCamelCase ={"a": ["c", "b"], "b": ["d", "e"], "c": [], "d": [], "e": []}
lowerCamelCase =["a", "b", "c", "d", "e"]
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
UpperCamelCase__ : str = start
# add current to visited
visited.append(UpperCamelCase__ )
UpperCamelCase__ : int = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
UpperCamelCase__ : int = topological_sort(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# if all neighbors visited add current to sort
sort.append(UpperCamelCase__ )
# if all vertices haven't been visited select a new one to visit
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ):
for vertice in vertices:
if vertice not in visited:
UpperCamelCase__ : Optional[int] = topological_sort(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# return sort
return sort
if __name__ == "__main__":
lowerCamelCase =topological_sort("a", [], [])
print(sort)
| 462 | 1 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
__snake_case :Dict ={'processing_wav2vec2_with_lm': ['Wav2Vec2ProcessorWithLM']}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
__snake_case :Union[str, Any] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 106 |
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase__ ( _lowerCamelCase , unittest.TestCase ):
A_ : Dict = GPTSanJapaneseTokenizer
A_ : Optional[int] = False
A_ : List[str] = {'do_clean_text': False, 'add_prefix_space': False}
def __UpperCamelCase ( self : Tuple ) -> Any:
super().setUp()
# fmt: off
A = ['こん', 'こんに', 'にちは', 'ばんは', '世界,㔺界', '、', '。', '<BR>', '<SP>', '<TAB>', '<URL>', '<EMAIL>', '<TEL>', '<DATE>', '<PRICE>', '<BLOCK>', '<KIGOU>', '<U2000U2BFF>', '<|emoji1|>', '<unk>', '<|bagoftoken|>', '<|endoftext|>']
# fmt: on
A = {'emoji': {'\ud83d\ude00': '<|emoji1|>'}, 'emoji_inv': {'<|emoji1|>': '\ud83d\ude00'}} # 😀
A = {'unk_token': '<unk>'}
A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['emoji_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
with open(self.emoji_file , 'w' ) as emoji_writer:
emoji_writer.write(json.dumps(__UpperCamelCase ) )
def __UpperCamelCase ( self : Any , **__UpperCamelCase : Any ) -> Tuple:
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **__UpperCamelCase )
def __UpperCamelCase ( self : str , __UpperCamelCase : Dict ) -> List[str]:
A = 'こんにちは、世界。 \nこんばんは、㔺界。😀'
A = 'こんにちは、世界。 \nこんばんは、世界。😀'
return input_text, output_text
def __UpperCamelCase ( self : str , __UpperCamelCase : Optional[Any] ) -> List[str]:
A , A = self.get_input_output_texts(__UpperCamelCase )
A = tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase )
A = tokenizer.decode(__UpperCamelCase , clean_up_tokenization_spaces=__UpperCamelCase )
return text, ids
def __UpperCamelCase ( self : Any ) -> int:
pass # TODO add if relevant
def __UpperCamelCase ( self : List[str] ) -> Tuple:
pass # TODO add if relevant
def __UpperCamelCase ( self : List[str] ) -> List[Any]:
pass # TODO add if relevant
def __UpperCamelCase ( self : List[str] ) -> int:
A = self.get_tokenizer()
# Testing tokenization
A = 'こんにちは、世界。 こんばんは、㔺界。'
A = ['こん', 'にちは', '、', '世界', '。', '<SP>', 'こん', 'ばんは', '、', '㔺界', '。']
A = tokenizer.tokenize(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
# Testing conversion to ids without special tokens
A = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
A = tokenizer.convert_tokens_to_ids(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
# Testing conversion to ids with special tokens
A = tokens + [tokenizer.unk_token]
A = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
A = tokenizer.convert_tokens_to_ids(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
def __UpperCamelCase ( self : Union[str, Any] ) -> List[str]:
A = self.get_tokenizer()
# Testing tokenization
A = 'こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。'
A = 'こんにちは、、、、世界。こんばんは、、、、世界。'
A = tokenizer.encode(__UpperCamelCase )
A = tokenizer.decode(__UpperCamelCase )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
@slow
def __UpperCamelCase ( self : Union[str, Any] ) -> str:
A = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
# Testing tokenization
A = 'こんにちは、世界。'
A = 'こんばんは、㔺界。😀'
A = 'こんにちは、世界。こんばんは、世界。😀'
A = tokenizer.encode(prefix_text + input_text )
A = tokenizer.encode('' , prefix_text=prefix_text + input_text )
A = tokenizer.encode(__UpperCamelCase , prefix_text=__UpperCamelCase )
A = tokenizer.decode(__UpperCamelCase )
A = tokenizer.decode(__UpperCamelCase )
A = tokenizer.decode(__UpperCamelCase )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
@slow
def __UpperCamelCase ( self : Union[str, Any] ) -> List[Any]:
A = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
# Testing tokenization
A = 'こんにちは、世界。'
A = 'こんばんは、㔺界。😀'
A = len(tokenizer.encode(__UpperCamelCase ) ) - 2
A = len(tokenizer.encode(__UpperCamelCase ) ) - 2
A = [1] + [0] * (len_prefix + len_text + 1)
A = [1] * (len_prefix + len_text + 1) + [0]
A = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
A = tokenizer(prefix_text + input_text ).token_type_ids
A = tokenizer('' , prefix_text=prefix_text + input_text ).token_type_ids
A = tokenizer(__UpperCamelCase , prefix_text=__UpperCamelCase ).token_type_ids
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
@slow
def __UpperCamelCase ( self : Any ) -> str:
A = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
A = tokenizer.encode('あンいワ' )
A = tokenizer.encode('' , prefix_text='あンいワ' )
A = tokenizer.encode('いワ' , prefix_text='あン' )
self.assertEqual(tokenizer.decode(__UpperCamelCase ) , tokenizer.decode(__UpperCamelCase ) )
self.assertEqual(tokenizer.decode(__UpperCamelCase ) , tokenizer.decode(__UpperCamelCase ) )
self.assertNotEqual(__UpperCamelCase , __UpperCamelCase )
self.assertNotEqual(__UpperCamelCase , __UpperCamelCase )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def __UpperCamelCase ( self : Any ) -> List[Any]:
A = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
A = [['武田信玄', 'は、'], ['織田信長', 'の配下の、']]
A = tokenizer(__UpperCamelCase , padding=__UpperCamelCase )
A = tokenizer.batch_encode_plus(__UpperCamelCase , padding=__UpperCamelCase )
# fmt: off
A = [[35_993, 8_640, 25_948, 35_998, 30_647, 35_675, 35_999, 35_999], [35_993, 10_382, 9_868, 35_998, 30_646, 9_459, 30_646, 35_675]]
A = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
A = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , __UpperCamelCase )
self.assertListEqual(x_token.token_type_ids , __UpperCamelCase )
self.assertListEqual(x_token.attention_mask , __UpperCamelCase )
self.assertListEqual(x_token_a.input_ids , __UpperCamelCase )
self.assertListEqual(x_token_a.token_type_ids , __UpperCamelCase )
self.assertListEqual(x_token_a.attention_mask , __UpperCamelCase )
def __UpperCamelCase ( self : Tuple ) -> str:
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def __UpperCamelCase ( self : Dict ) -> int:
# tokenizer has no padding token
pass | 106 | 1 |
'''simple docstring'''
from __future__ import annotations
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,) -> None:
lowerCamelCase_ = len(__UpperCamelCase )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append(['. ' * i + 'Q ' + '. ' * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(__UpperCamelCase ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] ,[*diagonal_right_collisions, row - col] ,[*diagonal_left_collisions, row + col] ,__UpperCamelCase ,__UpperCamelCase ,)
def _UpperCamelCase ( __UpperCamelCase ) -> None:
lowerCamelCase_ = []
depth_first_search([] ,[] ,[] ,__UpperCamelCase ,__UpperCamelCase )
# Print all the boards
for board in boards:
for column in board:
print(__UpperCamelCase )
print('' )
print(len(__UpperCamelCase ) ,'solutions were found.' )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 384 |
'''simple docstring'''
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class UpperCAmelCase :
'''simple docstring'''
@staticmethod
def UpperCamelCase( *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> List[str]:
'''simple docstring'''
pass
def _UpperCamelCase ( __UpperCamelCase ) -> str:
lowerCamelCase_ = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def _UpperCamelCase ( __UpperCamelCase ) -> Dict:
lowerCamelCase_ = np.array(__UpperCamelCase )
lowerCamelCase_ = npimg.shape
return {"hash": hashimage(__UpperCamelCase ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
SCREAMING_SNAKE_CASE_ = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = MaskGenerationPipeline(model=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
'''simple docstring'''
pass
@require_tf
@unittest.skip('Image segmentation not implemented in TF' )
def UpperCamelCase( self ) -> int:
'''simple docstring'''
pass
@slow
@require_torch
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = pipeline('mask-generation' , model='facebook/sam-vit-huge' )
lowerCamelCase_ = image_segmenter('http://images.cocodataset.org/val2017/000000039769.jpg' , points_per_batch=256 )
# Shortening by hashing
lowerCamelCase_ = []
for i, o in enumerate(outputs['masks'] ):
new_outupt += [{"mask": mask_to_test_readable(SCREAMING_SNAKE_CASE_ ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
{'mask': {'hash': '115ad19f5f', 'shape': (480, 640)}, 'scores': 1.0_444},
{'mask': {'hash': '6affa964c6', 'shape': (480, 640)}, 'scores': 1.021},
{'mask': {'hash': 'dfe28a0388', 'shape': (480, 640)}, 'scores': 1.0_167},
{'mask': {'hash': 'c0a5f4a318', 'shape': (480, 640)}, 'scores': 1.0_132},
{'mask': {'hash': 'fe8065c197', 'shape': (480, 640)}, 'scores': 1.0_053},
{'mask': {'hash': 'e2d0b7a0b7', 'shape': (480, 640)}, 'scores': 0.9_967},
{'mask': {'hash': '453c7844bd', 'shape': (480, 640)}, 'scores': 0.993},
{'mask': {'hash': '3d44f2926d', 'shape': (480, 640)}, 'scores': 0.9_909},
{'mask': {'hash': '64033ddc3f', 'shape': (480, 640)}, 'scores': 0.9_879},
{'mask': {'hash': '801064ff79', 'shape': (480, 640)}, 'scores': 0.9_834},
{'mask': {'hash': '6172f276ef', 'shape': (480, 640)}, 'scores': 0.9_716},
{'mask': {'hash': 'b49e60e084', 'shape': (480, 640)}, 'scores': 0.9_612},
{'mask': {'hash': 'a811e775fd', 'shape': (480, 640)}, 'scores': 0.9_599},
{'mask': {'hash': 'a6a8ebcf4b', 'shape': (480, 640)}, 'scores': 0.9_552},
{'mask': {'hash': '9d8257e080', 'shape': (480, 640)}, 'scores': 0.9_532},
{'mask': {'hash': '32de6454a8', 'shape': (480, 640)}, 'scores': 0.9_516},
{'mask': {'hash': 'af3d4af2c8', 'shape': (480, 640)}, 'scores': 0.9_499},
{'mask': {'hash': '3c6db475fb', 'shape': (480, 640)}, 'scores': 0.9_483},
{'mask': {'hash': 'c290813fb9', 'shape': (480, 640)}, 'scores': 0.9_464},
{'mask': {'hash': 'b6f0b8f606', 'shape': (480, 640)}, 'scores': 0.943},
{'mask': {'hash': '92ce16bfdf', 'shape': (480, 640)}, 'scores': 0.943},
{'mask': {'hash': 'c749b25868', 'shape': (480, 640)}, 'scores': 0.9_408},
{'mask': {'hash': 'efb6cab859', 'shape': (480, 640)}, 'scores': 0.9_335},
{'mask': {'hash': '1ff2eafb30', 'shape': (480, 640)}, 'scores': 0.9_326},
{'mask': {'hash': '788b798e24', 'shape': (480, 640)}, 'scores': 0.9_262},
{'mask': {'hash': 'abea804f0e', 'shape': (480, 640)}, 'scores': 0.8_999},
{'mask': {'hash': '7b9e8ddb73', 'shape': (480, 640)}, 'scores': 0.8_986},
{'mask': {'hash': 'cd24047c8a', 'shape': (480, 640)}, 'scores': 0.8_984},
{'mask': {'hash': '6943e6bcbd', 'shape': (480, 640)}, 'scores': 0.8_873},
{'mask': {'hash': 'b5f47c9191', 'shape': (480, 640)}, 'scores': 0.8_871}
] , )
# fmt: on
@require_torch
@slow
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = 'facebook/sam-vit-huge'
lowerCamelCase_ = pipeline('mask-generation' , model=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = image_segmenter(
'http://images.cocodataset.org/val2017/000000039769.jpg' , pred_iou_thresh=1 , points_per_batch=256 )
# Shortening by hashing
lowerCamelCase_ = []
for i, o in enumerate(outputs['masks'] ):
new_outupt += [{"mask": mask_to_test_readable(SCREAMING_SNAKE_CASE_ ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
{'mask': {'hash': '115ad19f5f', 'shape': (480, 640)}, 'scores': 1.0_444},
{'mask': {'hash': '6affa964c6', 'shape': (480, 640)}, 'scores': 1.0_210},
{'mask': {'hash': 'dfe28a0388', 'shape': (480, 640)}, 'scores': 1.0_167},
{'mask': {'hash': 'c0a5f4a318', 'shape': (480, 640)}, 'scores': 1.0_132},
{'mask': {'hash': 'fe8065c197', 'shape': (480, 640)}, 'scores': 1.0_053},
] , )
| 384 | 1 |
"""simple docstring"""
import torch
from transformers import AutoModel
class __UpperCAmelCase( torch.nn.Module ):
"""simple docstring"""
def __init__( self , snake_case__="sayef/fsner-bert-base-uncased" ):
'''simple docstring'''
super(snake_case__ , self ).__init__()
lowercase__ : Tuple= AutoModel.from_pretrained(snake_case__ , return_dict=snake_case__ )
lowercase__ : Dict= torch.nn.CosineSimilarity(3 , 1e-08 )
lowercase__ : Any= torch.nn.Softmax(dim=1 )
def UpperCAmelCase_ ( self , **snake_case__ ):
'''simple docstring'''
return self.bert(**snake_case__ ).last_hidden_state
def UpperCAmelCase_ ( self , snake_case__ ):
'''simple docstring'''
return token_embeddings.sum(2 , keepdim=snake_case__ )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__=1 ):
'''simple docstring'''
return self.softmax(T * self.cos(snake_case__ , snake_case__ ) )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ ):
'''simple docstring'''
lowercase__ : List[str]= W_supports["sizes"].tolist()
lowercase__ : int= W_supports["start_token_id"].item()
lowercase__ : List[Any]= W_supports["end_token_id"].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
lowercase__ : Union[str, Any]= self.BERT(**snake_case__ )
lowercase__ : int= self.BERT(**snake_case__ )
lowercase__ : Tuple= None
lowercase__ : Tuple= None
lowercase__ : Dict= W_supports["input_ids"] == start_token_id
lowercase__ : Tuple= W_supports["input_ids"] == end_token_id
for i, size in enumerate(snake_case__ ):
if i == 0:
lowercase__ : Optional[int]= 0
else:
lowercase__ : Optional[Any]= support_sizes[i - 1]
lowercase__ : Any= S[s : s + size][start_token_masks[s : s + size]]
lowercase__ : Any= S[s : s + size][end_token_masks[s : s + size]]
lowercase__ : str= torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
lowercase__ : str= torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
lowercase__ : Any= torch.vstack((p_starts, p_start) )
lowercase__ : Any= torch.vstack((p_ends, p_end) )
else:
lowercase__ : Any= p_start
lowercase__ : List[Any]= p_end
return p_starts, p_ends
| 218 |
"""simple docstring"""
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
a : List[str] = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
a : Optional[int] = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F"""{len(upper_files)} files contain uppercase characters:""")
print("""\n""".join(upper_files) + """\n""")
a : List[Any] = [file for file in filepaths if """ """ in file]
if space_files:
print(F"""{len(space_files)} files contain space characters:""")
print("""\n""".join(space_files) + """\n""")
a : Optional[int] = [file for file in filepaths if """-""" in file]
if hyphen_files:
print(F"""{len(hyphen_files)} files contain hyphen characters:""")
print("""\n""".join(hyphen_files) + """\n""")
a : List[str] = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F"""{len(nodir_files)} files are not in a directory:""")
print("""\n""".join(nodir_files) + """\n""")
a : Tuple = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 218 | 1 |
"""simple docstring"""
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 275 | """simple docstring"""
import torch
from torch import nn
class a ( nn.Module ):
def __init__( self : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : List[Any]=1 , __lowerCAmelCase : Tuple=False ):
super().__init__()
_UpperCAmelCase = n_token
_UpperCAmelCase = d_embed
_UpperCAmelCase = d_proj
_UpperCAmelCase = cutoffs + [n_token]
_UpperCAmelCase = [0] + self.cutoffs
_UpperCAmelCase = div_val
_UpperCAmelCase = self.cutoffs[0]
_UpperCAmelCase = len(self.cutoffs ) - 1
_UpperCAmelCase = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
_UpperCAmelCase = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
_UpperCAmelCase = nn.Parameter(torch.zeros(self.n_clusters ) )
_UpperCAmelCase = nn.ModuleList()
_UpperCAmelCase = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(__lowerCAmelCase , __lowerCAmelCase ) ) )
else:
self.out_projs.append(__lowerCAmelCase )
self.out_layers.append(nn.Linear(__lowerCAmelCase , __lowerCAmelCase ) )
else:
for i in range(len(self.cutoffs ) ):
_UpperCAmelCase , _UpperCAmelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_UpperCAmelCase = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(__lowerCAmelCase , __lowerCAmelCase ) ) )
self.out_layers.append(nn.Linear(__lowerCAmelCase , r_idx - l_idx ) )
_UpperCAmelCase = keep_order
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : int , __lowerCAmelCase : Tuple ):
if proj is None:
_UpperCAmelCase = nn.functional.linear(__lowerCAmelCase , __lowerCAmelCase , bias=__lowerCAmelCase )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
_UpperCAmelCase = nn.functional.linear(__lowerCAmelCase , proj.t().contiguous() )
_UpperCAmelCase = nn.functional.linear(__lowerCAmelCase , __lowerCAmelCase , bias=__lowerCAmelCase )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : Union[str, Any]=False ):
if labels is not None:
# Shift so that tokens < n predict n
_UpperCAmelCase = hidden[..., :-1, :].contiguous()
_UpperCAmelCase = labels[..., 1:].contiguous()
_UpperCAmelCase = hidden.view(-1 , hidden.size(-1 ) )
_UpperCAmelCase = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError("""Input and labels should have the same size in the batch dimension.""" )
else:
_UpperCAmelCase = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
_UpperCAmelCase = self._compute_logit(__lowerCAmelCase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
_UpperCAmelCase = labels != -100
_UpperCAmelCase = torch.zeros_like(__lowerCAmelCase , dtype=hidden.dtype , device=hidden.device )
_UpperCAmelCase = (
-nn.functional.log_softmax(__lowerCAmelCase , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
_UpperCAmelCase = nn.functional.log_softmax(__lowerCAmelCase , dim=-1 )
else:
# construct weights and biases
_UpperCAmelCase , _UpperCAmelCase = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
_UpperCAmelCase , _UpperCAmelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_UpperCAmelCase = self.out_layers[0].weight[l_idx:r_idx]
_UpperCAmelCase = self.out_layers[0].bias[l_idx:r_idx]
else:
_UpperCAmelCase = self.out_layers[i].weight
_UpperCAmelCase = self.out_layers[i].bias
if i == 0:
_UpperCAmelCase = torch.cat([weight_i, self.cluster_weight] , dim=0 )
_UpperCAmelCase = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(__lowerCAmelCase )
biases.append(__lowerCAmelCase )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = weights[0], biases[0], self.out_projs[0]
_UpperCAmelCase = self._compute_logit(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = nn.functional.log_softmax(__lowerCAmelCase , dim=1 )
if labels is None:
_UpperCAmelCase = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
_UpperCAmelCase = torch.zeros_like(__lowerCAmelCase , dtype=hidden.dtype , device=hidden.device )
_UpperCAmelCase = 0
_UpperCAmelCase = [0] + self.cutoffs
for i in range(len(__lowerCAmelCase ) - 1 ):
_UpperCAmelCase , _UpperCAmelCase = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
_UpperCAmelCase = (labels >= l_idx) & (labels < r_idx)
_UpperCAmelCase = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
_UpperCAmelCase = labels.index_select(0 , __lowerCAmelCase ) - l_idx
_UpperCAmelCase = head_logprob.index_select(0 , __lowerCAmelCase )
_UpperCAmelCase = hidden.index_select(0 , __lowerCAmelCase )
else:
_UpperCAmelCase = hidden
if i == 0:
if labels is not None:
_UpperCAmelCase = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
_UpperCAmelCase = head_logprob[:, : self.cutoffs[0]]
else:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = weights[i], biases[i], self.out_projs[i]
_UpperCAmelCase = self._compute_logit(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = nn.functional.log_softmax(__lowerCAmelCase , dim=1 )
_UpperCAmelCase = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
_UpperCAmelCase = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
_UpperCAmelCase = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
_UpperCAmelCase = logprob_i
if labels is not None:
if (hasattr(self , """keep_order""" ) and self.keep_order) or keep_order:
out.index_copy_(0 , __lowerCAmelCase , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : List[Any] ):
if self.n_clusters == 0:
_UpperCAmelCase = self._compute_logit(__lowerCAmelCase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(__lowerCAmelCase , dim=-1 )
else:
# construct weights and biases
_UpperCAmelCase , _UpperCAmelCase = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
_UpperCAmelCase , _UpperCAmelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_UpperCAmelCase = self.out_layers[0].weight[l_idx:r_idx]
_UpperCAmelCase = self.out_layers[0].bias[l_idx:r_idx]
else:
_UpperCAmelCase = self.out_layers[i].weight
_UpperCAmelCase = self.out_layers[i].bias
if i == 0:
_UpperCAmelCase = torch.cat([weight_i, self.cluster_weight] , dim=0 )
_UpperCAmelCase = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(__lowerCAmelCase )
biases.append(__lowerCAmelCase )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = weights[0], biases[0], self.out_projs[0]
_UpperCAmelCase = self._compute_logit(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = hidden.new_empty((head_logit.size(0 ), self.n_token) )
_UpperCAmelCase = nn.functional.log_softmax(__lowerCAmelCase , dim=1 )
_UpperCAmelCase = [0] + self.cutoffs
for i in range(len(__lowerCAmelCase ) - 1 ):
_UpperCAmelCase , _UpperCAmelCase = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
_UpperCAmelCase = head_logprob[:, : self.cutoffs[0]]
else:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = weights[i], biases[i], self.out_projs[i]
_UpperCAmelCase = self._compute_logit(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = nn.functional.log_softmax(__lowerCAmelCase , dim=1 )
_UpperCAmelCase = head_logprob[:, -i] + tail_logprob_i
_UpperCAmelCase = logprob_i
return out
| 275 | 1 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class UpperCAmelCase (metaclass=__UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :str = ["torch", "torchsde"]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(self , ['''torch''', '''torchsde'''] )
@classmethod
def _snake_case ( cls , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(cls , ['''torch''', '''torchsde'''] )
@classmethod
def _snake_case ( cls , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(cls , ['''torch''', '''torchsde'''] )
| 586 |
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class __magic_name__ ( __UpperCAmelCase ):
@staticmethod
@abstractmethod
def __snake_case ( snake_case__ : ArgumentParser ):
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def __snake_case ( self : Optional[Any] ):
'''simple docstring'''
raise NotImplementedError()
| 677 | 0 |
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _lowercase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
lowercase_ = IFInpaintingPipeline
lowercase_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'}
lowercase_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
lowercase_ = PipelineTesterMixin.required_optional_params - {'latents'}
def _UpperCamelCase ( self ) -> str:
return self._get_dummy_components()
def _UpperCamelCase ( self , UpperCAmelCase_ , UpperCAmelCase_=0 ) -> Optional[int]:
if str(UpperCAmelCase_ ).startswith('mps' ):
lowerCamelCase : Optional[int] = torch.manual_seed(UpperCAmelCase_ )
else:
lowerCamelCase : str = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ )
lowerCamelCase : Dict = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
lowerCamelCase : str = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
lowerCamelCase : str = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def _UpperCamelCase ( self ) -> Any:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def _UpperCamelCase ( self ) -> Optional[Any]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def _UpperCamelCase ( self ) -> Dict:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def _UpperCamelCase ( self ) -> Optional[Any]:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def _UpperCamelCase ( self ) -> List[Any]:
self._test_save_load_local()
def _UpperCamelCase ( self ) -> Any:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 133 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _lowercase ( __UpperCAmelCase , unittest.TestCase ):
lowercase_ = KandinskyInpaintPipeline
lowercase_ = ['prompt', 'image_embeds', 'negative_image_embeds', 'image', 'mask_image']
lowercase_ = [
'prompt',
'negative_prompt',
'image_embeds',
'negative_image_embeds',
'image',
'mask_image',
]
lowercase_ = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'negative_prompt',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
lowercase_ = False
@property
def _UpperCamelCase ( self ) -> List[str]:
return 32
@property
def _UpperCamelCase ( self ) -> Any:
return 32
@property
def _UpperCamelCase ( self ) -> Any:
return self.time_input_dim
@property
def _UpperCamelCase ( self ) -> List[Any]:
return self.time_input_dim * 4
@property
def _UpperCamelCase ( self ) -> str:
return 100
@property
def _UpperCamelCase ( self ) -> Optional[Any]:
lowerCamelCase : Optional[Any] = XLMRobertaTokenizerFast.from_pretrained('YiYiXu/tiny-random-mclip-base' )
return tokenizer
@property
def _UpperCamelCase ( self ) -> Any:
torch.manual_seed(0 )
lowerCamelCase : str = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
lowerCamelCase : List[Any] = MultilingualCLIP(UpperCAmelCase_ )
lowerCamelCase : List[Any] = text_encoder.eval()
return text_encoder
@property
def _UpperCamelCase ( self ) -> Tuple:
torch.manual_seed(0 )
lowerCamelCase : List[Any] = {
'in_channels': 9,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'text_image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'text_image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
lowerCamelCase : Tuple = UNetaDConditionModel(**UpperCAmelCase_ )
return model
@property
def _UpperCamelCase ( self ) -> List[str]:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _UpperCamelCase ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
lowerCamelCase : Any = VQModel(**self.dummy_movq_kwargs )
return model
def _UpperCamelCase ( self ) -> Union[str, Any]:
lowerCamelCase : Any = self.dummy_text_encoder
lowerCamelCase : List[str] = self.dummy_tokenizer
lowerCamelCase : int = self.dummy_unet
lowerCamelCase : Union[str, Any] = self.dummy_movq
lowerCamelCase : List[str] = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule='linear' , beta_start=0.00085 , beta_end=0.012 , clip_sample=UpperCAmelCase_ , set_alpha_to_one=UpperCAmelCase_ , steps_offset=1 , prediction_type='epsilon' , thresholding=UpperCAmelCase_ , )
lowerCamelCase : List[Any] = {
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def _UpperCamelCase ( self , UpperCAmelCase_ , UpperCAmelCase_=0 ) -> Tuple:
lowerCamelCase : Optional[Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
lowerCamelCase : Dict = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(UpperCAmelCase_ )
# create init_image
lowerCamelCase : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
lowerCamelCase : Tuple = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCamelCase : Dict = Image.fromarray(np.uinta(UpperCAmelCase_ ) ).convert('RGB' ).resize((256, 256) )
# create mask
lowerCamelCase : Optional[Any] = np.ones((64, 64) , dtype=np.floataa )
lowerCamelCase : Union[str, Any] = 0
if str(UpperCAmelCase_ ).startswith('mps' ):
lowerCamelCase : Optional[int] = torch.manual_seed(UpperCAmelCase_ )
else:
lowerCamelCase : Dict = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ )
lowerCamelCase : List[str] = {
'prompt': 'horse',
'image': init_image,
'mask_image': mask,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 2,
'guidance_scale': 4.0,
'output_type': 'np',
}
return inputs
def _UpperCamelCase ( self ) -> List[str]:
lowerCamelCase : Any = 'cpu'
lowerCamelCase : List[Any] = self.get_dummy_components()
lowerCamelCase : List[str] = self.pipeline_class(**UpperCAmelCase_ )
lowerCamelCase : Tuple = pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
lowerCamelCase : int = pipe(**self.get_dummy_inputs(UpperCAmelCase_ ) )
lowerCamelCase : int = output.images
lowerCamelCase : Union[str, Any] = pipe(
**self.get_dummy_inputs(UpperCAmelCase_ ) , return_dict=UpperCAmelCase_ , )[0]
lowerCamelCase : Any = image[0, -3:, -3:, -1]
lowerCamelCase : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
print(F"""image.shape {image.shape}""" )
assert image.shape == (1, 64, 64, 3)
lowerCamelCase : Dict = np.array(
[0.8326919, 0.73790467, 0.20918581, 0.9309612, 0.5511791, 0.43713328, 0.5513321, 0.49922934, 0.59497786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def _UpperCamelCase ( self ) -> Any:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
def _UpperCamelCase ( self ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase ( self ) -> List[str]:
lowerCamelCase : Union[str, Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy' )
lowerCamelCase : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
lowerCamelCase : Union[str, Any] = np.ones((768, 768) , dtype=np.floataa )
lowerCamelCase : Optional[Any] = 0
lowerCamelCase : Optional[int] = 'a hat'
lowerCamelCase : Dict = KandinskyPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-prior' , torch_dtype=torch.floataa )
pipe_prior.to(UpperCAmelCase_ )
lowerCamelCase : Union[str, Any] = KandinskyInpaintPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-inpaint' , torch_dtype=torch.floataa )
lowerCamelCase : Optional[int] = pipeline.to(UpperCAmelCase_ )
pipeline.set_progress_bar_config(disable=UpperCAmelCase_ )
lowerCamelCase : int = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCamelCase , lowerCamelCase : Tuple = pipe_prior(
UpperCAmelCase_ , generator=UpperCAmelCase_ , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
lowerCamelCase : Optional[Any] = pipeline(
UpperCAmelCase_ , image=UpperCAmelCase_ , mask_image=UpperCAmelCase_ , image_embeds=UpperCAmelCase_ , negative_image_embeds=UpperCAmelCase_ , generator=UpperCAmelCase_ , num_inference_steps=100 , height=768 , width=768 , output_type='np' , )
lowerCamelCase : List[str] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(UpperCAmelCase_ , UpperCAmelCase_ )
| 133 | 1 |
"""simple docstring"""
def _snake_case ( __snake_case : float , __snake_case : float , __snake_case : int ):
"""simple docstring"""
if principal <= 0:
raise Exception("""Principal borrowed must be > 0""" )
if rate_per_annum < 0:
raise Exception("""Rate of interest must be >= 0""" )
if years_to_repay <= 0 or not isinstance(__snake_case , __snake_case ):
raise Exception("""Years to repay must be an integer > 0""" )
# Yearly rate is divided by 12 to get monthly rate
_lowerCamelCase : Tuple = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
_lowerCamelCase : str = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 88 | import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class a__ ( __SCREAMING_SNAKE_CASE ):
_A = "Wav2Vec2FeatureExtractor"
_A = "AutoTokenizer"
def __init__( self : Any , A_ : Union[str, Any] , A_ : List[str] ) -> Tuple:
"""simple docstring"""
super().__init__(A_ , A_ )
lowerCamelCase_: str = self.feature_extractor
lowerCamelCase_: Optional[int] = False
@classmethod
def lowerCAmelCase ( cls : int , A_ : Dict , **A_ : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
try:
return super().from_pretrained(A_ , **A_ )
except OSError:
warnings.warn(
f"""Loading a tokenizer inside {cls.__name__} from a config that does not"""
""" include a `tokenizer_class` attribute is deprecated and will be """
"""removed in v5. Please add `'tokenizer_class': 'Wav2Vec2CTCTokenizer'`"""
""" attribute to either your `config.json` or `tokenizer_config.json` """
"""file to suppress this warning: """ , A_ , )
lowerCamelCase_: Tuple = WavaVecaFeatureExtractor.from_pretrained(A_ , **A_ )
lowerCamelCase_: List[str] = WavaVecaCTCTokenizer.from_pretrained(A_ , **A_ )
return cls(feature_extractor=A_ , tokenizer=A_ )
def __call__( self : List[str] , *A_ : Tuple , **A_ : Optional[Any] ) -> List[str]:
"""simple docstring"""
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*A_ , **A_ )
if "raw_speech" in kwargs:
warnings.warn("""Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.""" )
lowerCamelCase_: List[str] = kwargs.pop("""raw_speech""" )
else:
lowerCamelCase_: str = kwargs.pop("""audio""" , A_ )
lowerCamelCase_: str = kwargs.pop("""sampling_rate""" , A_ )
lowerCamelCase_: List[Any] = kwargs.pop("""text""" , A_ )
if len(A_ ) > 0:
lowerCamelCase_: List[str] = args[0]
lowerCamelCase_: Tuple = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if audio is not None:
lowerCamelCase_: Dict = self.feature_extractor(A_ , *A_ , sampling_rate=A_ , **A_ )
if text is not None:
lowerCamelCase_: Tuple = self.tokenizer(A_ , **A_ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
lowerCamelCase_: Any = encodings["""input_ids"""]
return inputs
def lowerCAmelCase ( self : Optional[Any] , *A_ : Optional[int] , **A_ : Tuple ) -> str:
"""simple docstring"""
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*A_ , **A_ )
lowerCamelCase_: str = kwargs.pop("""input_features""" , A_ )
lowerCamelCase_: Optional[Any] = kwargs.pop("""labels""" , A_ )
if len(A_ ) > 0:
lowerCamelCase_: Tuple = args[0]
lowerCamelCase_: int = args[1:]
if input_features is not None:
lowerCamelCase_: Tuple = self.feature_extractor.pad(A_ , *A_ , **A_ )
if labels is not None:
lowerCamelCase_: Optional[int] = self.tokenizer.pad(A_ , **A_ )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
lowerCamelCase_: List[str] = labels["""input_ids"""]
return input_features
def lowerCAmelCase ( self : str , *A_ : int , **A_ : List[str] ) -> Any:
"""simple docstring"""
return self.tokenizer.batch_decode(*A_ , **A_ )
def lowerCAmelCase ( self : Any , *A_ : Union[str, Any] , **A_ : str ) -> Tuple:
"""simple docstring"""
return self.tokenizer.decode(*A_ , **A_ )
@contextmanager
def lowerCAmelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your audio inputs, or in a separate call.""" )
lowerCamelCase_: Union[str, Any] = True
lowerCamelCase_: int = self.tokenizer
yield
lowerCamelCase_: int = self.feature_extractor
lowerCamelCase_: Optional[Any] = False
| 423 | 0 |
"""simple docstring"""
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class __magic_name__ :
def __init__( self : List[str] , snake_case__ : Tuple , snake_case__ : List[str]=1_3 , snake_case__ : Optional[Any]=7 , snake_case__ : Tuple=True , snake_case__ : Any=True , snake_case__ : Optional[int]=True , snake_case__ : Dict=True , snake_case__ : Optional[int]=9_9 , snake_case__ : Optional[Any]=2_4 , snake_case__ : Dict=2 , snake_case__ : List[str]=6 , snake_case__ : Optional[Any]=3_7 , snake_case__ : str="gelu" , snake_case__ : Tuple=0.1 , snake_case__ : List[str]=0.1 , snake_case__ : Dict=5_1_2 , snake_case__ : List[str]=1_6 , snake_case__ : Optional[int]=2 , snake_case__ : Union[str, Any]=0.02 , snake_case__ : List[str]=3 , snake_case__ : Optional[Any]=None , snake_case__ : Optional[int]=1_0_0_0 , ):
'''simple docstring'''
lowercase :Tuple = parent
lowercase :Optional[int] = batch_size
lowercase :Union[str, Any] = seq_length
lowercase :List[str] = is_training
lowercase :int = use_input_mask
lowercase :List[str] = use_token_type_ids
lowercase :Dict = use_labels
lowercase :Optional[int] = vocab_size
lowercase :Union[str, Any] = hidden_size
lowercase :Union[str, Any] = num_hidden_layers
lowercase :Optional[Any] = num_attention_heads
lowercase :Optional[int] = intermediate_size
lowercase :Optional[int] = hidden_act
lowercase :Optional[int] = hidden_dropout_prob
lowercase :Any = attention_probs_dropout_prob
lowercase :Union[str, Any] = max_position_embeddings
lowercase :str = type_vocab_size
lowercase :Optional[Any] = type_sequence_label_size
lowercase :Tuple = initializer_range
lowercase :List[str] = num_labels
lowercase :Union[str, Any] = scope
lowercase :Dict = range_bbox
def __snake_case ( self : Dict ):
'''simple docstring'''
lowercase :Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase :Optional[int] = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
lowercase :Optional[int] = bbox[i, j, 3]
lowercase :Any = bbox[i, j, 1]
lowercase :Tuple = t
if bbox[i, j, 2] < bbox[i, j, 0]:
lowercase :Optional[Any] = bbox[i, j, 2]
lowercase :Dict = bbox[i, j, 0]
lowercase :Any = t
lowercase :Dict = None
if self.use_input_mask:
lowercase :Any = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
lowercase :List[str] = None
if self.use_token_type_ids:
lowercase :int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase :List[str] = None
lowercase :Dict = None
if self.use_labels:
lowercase :List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase :Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase :Optional[Any] = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def __snake_case ( self : Union[str, Any] ):
'''simple docstring'''
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def __snake_case ( self : Optional[int] , snake_case__ : Tuple , snake_case__ : Dict , snake_case__ : Tuple , snake_case__ : List[Any] , snake_case__ : int , snake_case__ : Any , snake_case__ : Optional[Any] , ):
'''simple docstring'''
lowercase :List[str] = LiltModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase :Dict = model(snake_case__ , bbox=snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ )
lowercase :int = model(snake_case__ , bbox=snake_case__ , token_type_ids=snake_case__ )
lowercase :int = model(snake_case__ , bbox=snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __snake_case ( self : Dict , snake_case__ : List[Any] , snake_case__ : Tuple , snake_case__ : str , snake_case__ : List[Any] , snake_case__ : Tuple , snake_case__ : Tuple , snake_case__ : Dict , ):
'''simple docstring'''
lowercase :int = self.num_labels
lowercase :List[str] = LiltForTokenClassification(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase :Optional[int] = model(
snake_case__ , bbox=snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __snake_case ( self : List[Any] , snake_case__ : Union[str, Any] , snake_case__ : int , snake_case__ : str , snake_case__ : Dict , snake_case__ : Union[str, Any] , snake_case__ : Tuple , snake_case__ : Optional[int] , ):
'''simple docstring'''
lowercase :Tuple = LiltForQuestionAnswering(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase :str = model(
snake_case__ , bbox=snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __snake_case ( self : str ):
'''simple docstring'''
lowercase :Tuple = self.prepare_config_and_inputs()
(
lowercase
) :Optional[Any] = config_and_inputs
lowercase :Dict = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
__A : Tuple = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
__A : Tuple = (
{
"feature-extraction": LiltModel,
"question-answering": LiltForQuestionAnswering,
"text-classification": LiltForSequenceClassification,
"token-classification": LiltForTokenClassification,
"zero-shot": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
__A : Optional[Any] = False
__A : List[str] = False
def __snake_case ( self : List[Any] , snake_case__ : Tuple , snake_case__ : Tuple , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] ):
'''simple docstring'''
return True
def __snake_case ( self : str ):
'''simple docstring'''
lowercase :str = LiltModelTester(self )
lowercase :List[Any] = ConfigTester(self , config_class=snake_case__ , hidden_size=3_7 )
def __snake_case ( self : Union[str, Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __snake_case ( self : str ):
'''simple docstring'''
lowercase :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def __snake_case ( self : int ):
'''simple docstring'''
lowercase :Any = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase :Optional[Any] = type
self.model_tester.create_and_check_model(*snake_case__ )
def __snake_case ( self : Optional[int] ):
'''simple docstring'''
lowercase :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case__ )
def __snake_case ( self : Union[str, Any] ):
'''simple docstring'''
lowercase :str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case__ )
@slow
def __snake_case ( self : Optional[Any] ):
'''simple docstring'''
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase :Optional[int] = LiltModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
@require_torch
@slow
class __magic_name__ ( unittest.TestCase ):
def __snake_case ( self : Tuple ):
'''simple docstring'''
lowercase :List[Any] = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''' ).to(snake_case__ )
lowercase :List[Any] = torch.tensor([[1, 2]] , device=snake_case__ )
lowercase :Optional[Any] = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=snake_case__ )
# forward pass
with torch.no_grad():
lowercase :Optional[Any] = model(input_ids=snake_case__ , bbox=snake_case__ )
lowercase :Tuple = torch.Size([1, 2, 7_6_8] )
lowercase :Any = torch.tensor(
[[-0.06_53, 0.09_50, -0.00_61], [-0.05_45, 0.09_26, -0.03_24]] , device=snake_case__ , )
self.assertTrue(outputs.last_hidden_state.shape , snake_case__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , snake_case__ , atol=1e-3 ) )
| 715 |
"""simple docstring"""
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
UpperCAmelCase = '''sshleifer/mar_enro_6_3_student'''
class __magic_name__ ( __UpperCAmelCase ):
def __snake_case ( self : int ):
'''simple docstring'''
super().setUp()
lowercase :Union[str, Any] = cached_path(
'''https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz''' , extract_compressed_file=snake_case__ , )
lowercase :int = f"""{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k"""
@slow
@require_torch_gpu
def __snake_case ( self : Any ):
'''simple docstring'''
MarianMTModel.from_pretrained(snake_case__ )
@slow
@require_torch_gpu
def __snake_case ( self : Tuple ):
'''simple docstring'''
lowercase :str = {
'''$MAX_LEN''': 6_4,
'''$BS''': 6_4,
'''$GAS''': 1,
'''$ENRO_DIR''': self.data_dir,
'''facebook/mbart-large-cc25''': MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
'''--learning_rate=3e-5''': '''--learning_rate 3e-4''',
'''--num_train_epochs 6''': '''--num_train_epochs 1''',
}
# Clean up bash script
lowercase :str = (self.test_file_dir / '''train_mbart_cc25_enro.sh''').open().read().split('''finetune.py''' )[1].strip()
lowercase :Any = bash_script.replace('''\\\n''' , '''''' ).strip().replace('''"$@"''' , '''''' )
for k, v in env_vars_to_replace.items():
lowercase :Dict = bash_script.replace(snake_case__ , str(snake_case__ ) )
lowercase :Any = self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
lowercase :Optional[Any] = f"""
--output_dir {output_dir}
--tokenizer_name Helsinki-NLP/opus-mt-en-ro
--sortish_sampler
--do_predict
--gpus 1
--freeze_encoder
--n_train 40000
--n_val 500
--n_test 500
--fp16_opt_level O1
--num_sanity_val_steps 0
--eval_beams 2
""".split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
lowercase :str = ['''finetune.py'''] + bash_script.split() + args
with patch.object(snake_case__ , '''argv''' , snake_case__ ):
lowercase :Optional[int] = argparse.ArgumentParser()
lowercase :List[Any] = pl.Trainer.add_argparse_args(snake_case__ )
lowercase :Union[str, Any] = SummarizationModule.add_model_specific_args(snake_case__ , os.getcwd() )
lowercase :int = parser.parse_args()
lowercase :int = main(snake_case__ )
# Check metrics
lowercase :int = load_json(model.metrics_save_path )
lowercase :Tuple = metrics['''val'''][0]
lowercase :Any = metrics['''val'''][-1]
self.assertEqual(len(metrics['''val'''] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[f"""val_avg_{model.val_metric}"""] , snake_case__ )
self.assertGreater(last_step_stats['''val_avg_gen_time'''] , 0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats['''val_avg_gen_time'''] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats['''val_avg_bleu'''] - first_step_stats['''val_avg_bleu'''] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats['''val_avg_bleu'''] , 1_7 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics['''val'''][-1]['''val_avg_bleu'''] - metrics['''test'''][-1]['''test_avg_bleu'''] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
lowercase :Any = os.listdir(snake_case__ )
lowercase :int = [x for x in contents if x.endswith('''.ckpt''' )][0]
lowercase :Any = os.path.join(args.output_dir , snake_case__ )
lowercase :int = torch.load(snake_case__ , map_location='''cpu''' )
lowercase :Any = '''model.model.decoder.layers.0.encoder_attn_layer_norm.weight'''
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
lowercase :Optional[Any] = {os.path.basename(snake_case__ ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['''test'''] ) == 1
class __magic_name__ ( __UpperCAmelCase ):
@timeout_decorator.timeout(6_0_0 )
@slow
@require_torch_gpu
def __snake_case ( self : Optional[int] ):
'''simple docstring'''
lowercase :Union[str, Any] = f"""{self.test_file_dir_str}/test_data/wmt_en_ro"""
lowercase :List[Any] = {
'''--fp16_opt_level=O1''': '''''',
'''$MAX_LEN''': 1_2_8,
'''$BS''': 1_6,
'''$GAS''': 1,
'''$ENRO_DIR''': data_dir,
'''$m''': '''sshleifer/student_marian_en_ro_6_1''',
'''val_check_interval=0.25''': '''val_check_interval=1.0''',
}
# Clean up bash script
lowercase :Dict = (
(self.test_file_dir / '''distil_marian_no_teacher.sh''').open().read().split('''distillation.py''' )[1].strip()
)
lowercase :Union[str, Any] = bash_script.replace('''\\\n''' , '''''' ).strip().replace('''"$@"''' , '''''' )
lowercase :Dict = bash_script.replace('''--fp16 ''' , ''' ''' )
for k, v in env_vars_to_replace.items():
lowercase :str = bash_script.replace(snake_case__ , str(snake_case__ ) )
lowercase :Union[str, Any] = self.get_auto_remove_tmp_dir()
lowercase :str = bash_script.replace('''--fp16''' , '''''' )
lowercase :Union[str, Any] = 6
lowercase :str = (
['''distillation.py''']
+ bash_script.split()
+ [
f"""--output_dir={output_dir}""",
'''--gpus=1''',
'''--learning_rate=1e-3''',
f"""--num_train_epochs={epochs}""",
'''--warmup_steps=10''',
'''--val_check_interval=1.0''',
'''--do_predict''',
]
)
with patch.object(snake_case__ , '''argv''' , snake_case__ ):
lowercase :Optional[int] = argparse.ArgumentParser()
lowercase :Tuple = pl.Trainer.add_argparse_args(snake_case__ )
lowercase :Optional[Any] = SummarizationDistiller.add_model_specific_args(snake_case__ , os.getcwd() )
lowercase :Optional[int] = parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
lowercase :Tuple = distill_main(snake_case__ )
# Check metrics
lowercase :Tuple = load_json(model.metrics_save_path )
lowercase :int = metrics['''val'''][0]
lowercase :List[str] = metrics['''val'''][-1]
assert len(metrics['''val'''] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[f"""val_avg_{model.val_metric}"""] , snake_case__ )
# check lightning ckpt can be loaded and has a reasonable statedict
lowercase :Union[str, Any] = os.listdir(snake_case__ )
lowercase :List[str] = [x for x in contents if x.endswith('''.ckpt''' )][0]
lowercase :Dict = os.path.join(args.output_dir , snake_case__ )
lowercase :Optional[int] = torch.load(snake_case__ , map_location='''cpu''' )
lowercase :str = '''model.model.decoder.layers.0.encoder_attn_layer_norm.weight'''
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
lowercase :int = {os.path.basename(snake_case__ ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['''test'''] ) == 1
| 475 | 0 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
SCREAMING_SNAKE_CASE_: List[str] =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_: Optional[int] ={'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
# See all MVP models at https://huggingface.co/models?filter=mvp
SCREAMING_SNAKE_CASE_: Any ={
'vocab_file': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json',
},
'added_tokens.json': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json',
},
'merges_file': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt',
},
'tokenizer_file': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json',
},
}
SCREAMING_SNAKE_CASE_: int ={
'RUCAIBox/mvp': 10_24,
}
class __A ( UpperCamelCase__ ):
a__ : Optional[int] = VOCAB_FILES_NAMES
a__ : str = PRETRAINED_VOCAB_FILES_MAP
a__ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : Any = ["""input_ids""", """attention_mask"""]
a__ : List[Any] = MvpTokenizer
def __init__(self : Dict , __a : List[Any]=None , __a : List[Any]=None , __a : Optional[int]=None , __a : Any="replace" , __a : Optional[Any]="<s>" , __a : List[str]="</s>" , __a : int="</s>" , __a : Optional[int]="<s>" , __a : str="<unk>" , __a : str="<pad>" , __a : List[Any]="<mask>" , __a : Tuple=False , __a : str=True , **__a : Optional[int] , ):
super().__init__(
__a , __a , tokenizer_file=__a , errors=__a , bos_token=__a , eos_token=__a , sep_token=__a , cls_token=__a , unk_token=__a , pad_token=__a , mask_token=__a , add_prefix_space=__a , trim_offsets=__a , **__a , )
UpperCAmelCase_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , __a ) != add_prefix_space:
UpperCAmelCase_ = getattr(__a , pre_tok_state.pop("type" ) )
UpperCAmelCase_ = add_prefix_space
UpperCAmelCase_ = pre_tok_class(**__a )
UpperCAmelCase_ = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
UpperCAmelCase_ = "post_processor"
UpperCAmelCase_ = getattr(self.backend_tokenizer , __a , __a )
if tokenizer_component_instance:
UpperCAmelCase_ = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
UpperCAmelCase_ = tuple(state["sep"] )
if "cls" in state:
UpperCAmelCase_ = tuple(state["cls"] )
UpperCAmelCase_ = False
if state.get("add_prefix_space" , __a ) != add_prefix_space:
UpperCAmelCase_ = add_prefix_space
UpperCAmelCase_ = True
if state.get("trim_offsets" , __a ) != trim_offsets:
UpperCAmelCase_ = trim_offsets
UpperCAmelCase_ = True
if changes_to_apply:
UpperCAmelCase_ = getattr(__a , state.pop("type" ) )
UpperCAmelCase_ = component_class(**__a )
setattr(self.backend_tokenizer , __a , __a )
@property
def _lowercase (self : List[str] ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def _lowercase (self : str , __a : str ):
UpperCAmelCase_ = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else value
UpperCAmelCase_ = value
def _lowercase (self : str , *__a : Dict , **__a : Optional[Any] ):
UpperCAmelCase_ = kwargs.get("is_split_into_words" , __a )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*__a , **__a )
def _lowercase (self : Optional[int] , *__a : Optional[int] , **__a : int ):
UpperCAmelCase_ = kwargs.get("is_split_into_words" , __a )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs." )
return super()._encode_plus(*__a , **__a )
def _lowercase (self : List[str] , __a : str , __a : Optional[str] = None ):
UpperCAmelCase_ = self._tokenizer.model.save(__a , name=__a )
return tuple(__a )
def _lowercase (self : Tuple , __a : Any , __a : List[str]=None ):
UpperCAmelCase_ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _lowercase (self : Tuple , __a : List[int] , __a : Optional[List[int]] = None ):
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 78 |
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
"google/efficientnet-b7": "https://huggingface.co/google/efficientnet-b7/resolve/main/config.json",
}
class UpperCAmelCase ( __snake_case ):
lowercase = """efficientnet"""
def __init__( self : List[str] , __magic_name__ : int = 3 , __magic_name__ : int = 6_0_0 , __magic_name__ : float = 2.0 , __magic_name__ : float = 3.1 , __magic_name__ : int = 8 , __magic_name__ : List[int] = [3, 3, 5, 3, 5, 5, 3] , __magic_name__ : List[int] = [3_2, 1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2] , __magic_name__ : List[int] = [1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2, 3_2_0] , __magic_name__ : List[int] = [] , __magic_name__ : List[int] = [1, 2, 2, 2, 1, 2, 1] , __magic_name__ : List[int] = [1, 2, 2, 3, 3, 4, 1] , __magic_name__ : List[int] = [1, 6, 6, 6, 6, 6, 6] , __magic_name__ : float = 0.25 , __magic_name__ : str = "swish" , __magic_name__ : int = 2_5_6_0 , __magic_name__ : str = "mean" , __magic_name__ : float = 0.02 , __magic_name__ : float = 0.001 , __magic_name__ : float = 0.99 , __magic_name__ : float = 0.5 , __magic_name__ : float = 0.2 , **__magic_name__ : Any , ):
"""simple docstring"""
super().__init__(**__magic_name__ )
UpperCamelCase = num_channels
UpperCamelCase = image_size
UpperCamelCase = width_coefficient
UpperCamelCase = depth_coefficient
UpperCamelCase = depth_divisor
UpperCamelCase = kernel_sizes
UpperCamelCase = in_channels
UpperCamelCase = out_channels
UpperCamelCase = depthwise_padding
UpperCamelCase = strides
UpperCamelCase = num_block_repeats
UpperCamelCase = expand_ratios
UpperCamelCase = squeeze_expansion_ratio
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dim
UpperCamelCase = pooling_type
UpperCamelCase = initializer_range
UpperCamelCase = batch_norm_eps
UpperCamelCase = batch_norm_momentum
UpperCamelCase = dropout_rate
UpperCamelCase = drop_connect_rate
UpperCamelCase = sum(__magic_name__ ) * 4
class UpperCAmelCase ( __snake_case ):
lowercase = version.parse("""1.11""" )
@property
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
return 1e-5
| 386 | 0 |
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
@slow
def _a (self ):
'''simple docstring'''
lowerCamelCase = FlaxMTaForConditionalGeneration.from_pretrained("google/mt5-small" )
lowerCamelCase = AutoTokenizer.from_pretrained("google/mt5-small" )
lowerCamelCase = tokenizer("Hello there" , return_tensors="np" ).input_ids
lowerCamelCase = tokenizer("Hi I am" , return_tensors="np" ).input_ids
lowerCamelCase = shift_tokens_right(_a , model.config.pad_token_id , model.config.decoder_start_token_id )
lowerCamelCase = model(_a , decoder_input_ids=_a ).logits
lowerCamelCase = optax.softmax_cross_entropy(_a , onehot(_a , logits.shape[-1] ) ).mean()
lowerCamelCase = -(labels.shape[-1] * loss.item())
lowerCamelCase = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 712 |
def __lowercase( UpperCAmelCase__ ):
"""simple docstring"""
lowerCamelCase = []
lowerCamelCase = []
lowerCamelCase = {
"^": 3,
"*": 2,
"/": 2,
"%": 2,
"+": 1,
"-": 1,
} # Priority of each operator
lowerCamelCase = len(UpperCAmelCase__ ) if (len(UpperCAmelCase__ ) > 7) else 7
# Print table header for output
print(
"Symbol".center(8 ) , "Stack".center(UpperCAmelCase__ ) , "Postfix".center(UpperCAmelCase__ ) , sep=" | " , )
print("-" * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(UpperCAmelCase__ ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(UpperCAmelCase__ ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(UpperCAmelCase__ ) == 0:
stack.append(UpperCAmelCase__ ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(UpperCAmelCase__ ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(UpperCAmelCase__ ) # push x to stack
print(
x.center(8 ) , ("".join(UpperCAmelCase__ )).ljust(UpperCAmelCase__ ) , ("".join(UpperCAmelCase__ )).ljust(UpperCAmelCase__ ) , sep=" | " , ) # Output in tabular format
while len(UpperCAmelCase__ ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
" ".center(8 ) , ("".join(UpperCAmelCase__ )).ljust(UpperCAmelCase__ ) , ("".join(UpperCAmelCase__ )).ljust(UpperCAmelCase__ ) , sep=" | " , ) # Output in tabular format
return "".join(UpperCAmelCase__ ) # return Postfix as str
def __lowercase( UpperCAmelCase__ ):
"""simple docstring"""
lowerCamelCase = list(infix[::-1] ) # reverse the infix equation
for i in range(len(UpperCAmelCase__ ) ):
if infix[i] == "(":
lowerCamelCase = ")" # change "(" to ")"
elif infix[i] == ")":
lowerCamelCase = "(" # change ")" to "("
return (infix_2_postfix("".join(UpperCAmelCase__ ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
a_ : str = input('\nEnter an Infix Equation = ') # Input an Infix equation
a_ : List[Any] = ''.join(Infix.split()) # Remove spaces from the input
print('\n\t', Infix, '(Infix) -> ', infix_2_prefix(Infix), '(Prefix)') | 484 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ = {
"configuration_electra": ["ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP", "ElectraConfig", "ElectraOnnxConfig"],
"tokenization_electra": ["ElectraTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["ElectraTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST",
"ElectraForCausalLM",
"ElectraForMaskedLM",
"ElectraForMultipleChoice",
"ElectraForPreTraining",
"ElectraForQuestionAnswering",
"ElectraForSequenceClassification",
"ElectraForTokenClassification",
"ElectraModel",
"ElectraPreTrainedModel",
"load_tf_weights_in_electra",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFElectraForMaskedLM",
"TFElectraForMultipleChoice",
"TFElectraForPreTraining",
"TFElectraForQuestionAnswering",
"TFElectraForSequenceClassification",
"TFElectraForTokenClassification",
"TFElectraModel",
"TFElectraPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"FlaxElectraForCausalLM",
"FlaxElectraForMaskedLM",
"FlaxElectraForMultipleChoice",
"FlaxElectraForPreTraining",
"FlaxElectraForQuestionAnswering",
"FlaxElectraForSequenceClassification",
"FlaxElectraForTokenClassification",
"FlaxElectraModel",
"FlaxElectraPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 532 |
"""simple docstring"""
class lowercase :
def __init__( self ) -> Any:
lowerCAmelCase = """"""
lowerCAmelCase = """"""
lowerCAmelCase = []
def _snake_case ( self , lowercase , lowercase ) -> int:
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
lowerCAmelCase = self.__min_dist_top_down_dp(m - 1 , n - 1 )
else:
lowerCAmelCase = self.__min_dist_top_down_dp(lowercase , n - 1 )
lowerCAmelCase = self.__min_dist_top_down_dp(m - 1 , lowercase )
lowerCAmelCase = self.__min_dist_top_down_dp(m - 1 , n - 1 )
lowerCAmelCase = 1 + min(lowercase , lowercase , lowercase )
return self.dp[m][n]
def _snake_case ( self , lowercase , lowercase ) -> int:
lowerCAmelCase = worda
lowerCAmelCase = worda
lowerCAmelCase = [[-1 for _ in range(len(lowercase ) )] for _ in range(len(lowercase ) )]
return self.__min_dist_top_down_dp(len(lowercase ) - 1 , len(lowercase ) - 1 )
def _snake_case ( self , lowercase , lowercase ) -> int:
lowerCAmelCase = worda
lowerCAmelCase = worda
lowerCAmelCase = len(lowercase )
lowerCAmelCase = len(lowercase )
lowerCAmelCase = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
lowerCAmelCase = j
elif j == 0: # second string is empty
lowerCAmelCase = i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
lowerCAmelCase = self.dp[i - 1][j - 1]
else:
lowerCAmelCase = self.dp[i][j - 1]
lowerCAmelCase = self.dp[i - 1][j]
lowerCAmelCase = self.dp[i - 1][j - 1]
lowerCAmelCase = 1 + min(lowercase , lowercase , lowercase )
return self.dp[m][n]
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = EditDistance()
print("****************** Testing Edit Distance DP Algorithm ******************")
print()
SCREAMING_SNAKE_CASE__ = input("Enter the first string: ").strip()
SCREAMING_SNAKE_CASE__ = input("Enter the second string: ").strip()
print()
print(f'The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}')
print(f'The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}')
print()
print("*************** End of Testing Edit Distance DP Algorithm ***************")
| 532 | 1 |
'''simple docstring'''
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase )-> str:
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
__UpperCAmelCase = str(bin(lowerCamelCase__ ) )[2:] # remove the leading "0b"
__UpperCAmelCase = str(bin(lowerCamelCase__ ) )[2:]
__UpperCAmelCase = max(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) )
return "0b" + "".join(
str(int('1' in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(lowerCamelCase__ ) , b_binary.zfill(lowerCamelCase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 710 |
'''simple docstring'''
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase ( UpperCAmelCase_ ):
def __init__( self , __A , __A=13 , __A=7 , __A=True , __A=True , __A=True , __A=True , __A=99 , __A=32 , __A=5 , __A=4 , __A=37 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=16 , __A=2 , __A=0.0_2 , __A=False , __A=True , __A="None" , __A=3 , __A=4 , __A=None , ):
__UpperCAmelCase = parent
__UpperCAmelCase = batch_size
__UpperCAmelCase = seq_length
__UpperCAmelCase = is_training
__UpperCAmelCase = use_input_mask
__UpperCAmelCase = use_token_type_ids
__UpperCAmelCase = use_labels
__UpperCAmelCase = vocab_size
__UpperCAmelCase = hidden_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_act
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = type_vocab_size
__UpperCAmelCase = type_sequence_label_size
__UpperCAmelCase = initializer_range
__UpperCAmelCase = num_labels
__UpperCAmelCase = num_choices
__UpperCAmelCase = relative_attention
__UpperCAmelCase = position_biased_input
__UpperCAmelCase = pos_att_type
__UpperCAmelCase = scope
def __lowerCamelCase ( self ):
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase = None
if self.use_input_mask:
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
__UpperCAmelCase = None
if self.use_token_type_ids:
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = None
if self.use_labels:
__UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
__UpperCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCamelCase ( self ):
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def __lowerCamelCase ( self , __A ):
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def __lowerCamelCase ( self , __A , __A , __A , __A , __A , __A , __A ):
__UpperCAmelCase = DebertaVaModel(config=__A )
model.to(__A )
model.eval()
__UpperCAmelCase = model(__A , attention_mask=__A , token_type_ids=__A )[0]
__UpperCAmelCase = model(__A , token_type_ids=__A )[0]
__UpperCAmelCase = model(__A )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def __lowerCamelCase ( self , __A , __A , __A , __A , __A , __A , __A ):
__UpperCAmelCase = DebertaVaForMaskedLM(config=__A )
model.to(__A )
model.eval()
__UpperCAmelCase = model(__A , attention_mask=__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCamelCase ( self , __A , __A , __A , __A , __A , __A , __A ):
__UpperCAmelCase = self.num_labels
__UpperCAmelCase = DebertaVaForSequenceClassification(__A )
model.to(__A )
model.eval()
__UpperCAmelCase = model(__A , attention_mask=__A , token_type_ids=__A , labels=__A )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(__A )
def __lowerCamelCase ( self , __A , __A , __A , __A , __A , __A , __A ):
__UpperCAmelCase = self.num_labels
__UpperCAmelCase = DebertaVaForTokenClassification(config=__A )
model.to(__A )
model.eval()
__UpperCAmelCase = model(__A , attention_mask=__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCamelCase ( self , __A , __A , __A , __A , __A , __A , __A ):
__UpperCAmelCase = DebertaVaForQuestionAnswering(config=__A )
model.to(__A )
model.eval()
__UpperCAmelCase = model(
__A , attention_mask=__A , token_type_ids=__A , start_positions=__A , end_positions=__A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCamelCase ( self , __A , __A , __A , __A , __A , __A , __A ):
__UpperCAmelCase = DebertaVaForMultipleChoice(config=__A )
model.to(__A )
model.eval()
__UpperCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase = model(
__A , attention_mask=__A , token_type_ids=__A , labels=__A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCamelCase ( self ):
__UpperCAmelCase = self.prepare_config_and_inputs()
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) = config_and_inputs
__UpperCAmelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
_A : Optional[Any] = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
_A : int = (
{
"""feature-extraction""": DebertaVaModel,
"""fill-mask""": DebertaVaForMaskedLM,
"""question-answering""": DebertaVaForQuestionAnswering,
"""text-classification""": DebertaVaForSequenceClassification,
"""token-classification""": DebertaVaForTokenClassification,
"""zero-shot""": DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
_A : List[str] = True
_A : Union[str, Any] = False
_A : int = False
_A : Dict = False
_A : int = False
def __lowerCamelCase ( self ):
__UpperCAmelCase = DebertaVaModelTester(self )
__UpperCAmelCase = ConfigTester(self , config_class=__A , hidden_size=37 )
def __lowerCamelCase ( self ):
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ):
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*__A )
def __lowerCamelCase ( self ):
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*__A )
def __lowerCamelCase ( self ):
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*__A )
def __lowerCamelCase ( self ):
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*__A )
def __lowerCamelCase ( self ):
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*__A )
def __lowerCamelCase ( self ):
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*__A )
@slow
def __lowerCamelCase ( self ):
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase = DebertaVaModel.from_pretrained(__A )
self.assertIsNotNone(__A )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( unittest.TestCase ):
@unittest.skip(reason='Model not available yet' )
def __lowerCamelCase ( self ):
pass
@slow
def __lowerCamelCase ( self ):
__UpperCAmelCase = DebertaVaModel.from_pretrained('microsoft/deberta-v2-xlarge' )
__UpperCAmelCase = torch.tensor([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] )
__UpperCAmelCase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__UpperCAmelCase = model(__A , attention_mask=__A )[0]
# compare the actual values for a slice.
__UpperCAmelCase = torch.tensor(
[[[0.2_3_5_6, 0.1_9_4_8, 0.0_3_6_9], [-0.1_0_6_3, 0.3_5_8_6, -0.5_1_5_2], [-0.6_3_9_9, -0.0_2_5_9, -0.2_5_2_5]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __A , atol=1E-4 ) , f'{output[:, 1:4, 1:4]}' )
| 617 | 0 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCAmelCase_ :
def __init__( self : str, _snake_case : List[str], _snake_case : Any=3, _snake_case : Dict=32, _snake_case : str=3, _snake_case : int=10, _snake_case : Dict=[10, 20, 30, 40], _snake_case : Any=[1, 1, 2, 1], _snake_case : Any=True, _snake_case : List[Any]=True, _snake_case : Dict="relu", _snake_case : List[Any]=3, _snake_case : Optional[Any]=None, ):
'''simple docstring'''
snake_case : int =parent
snake_case : Union[str, Any] =batch_size
snake_case : Any =image_size
snake_case : str =num_channels
snake_case : str =embeddings_size
snake_case : List[Any] =hidden_sizes
snake_case : int =depths
snake_case : Any =is_training
snake_case : Union[str, Any] =use_labels
snake_case : List[Any] =hidden_act
snake_case : Any =num_labels
snake_case : Optional[Any] =scope
snake_case : Union[str, Any] =len(_snake_case )
def __snake_case ( self : Union[str, Any] ):
'''simple docstring'''
snake_case : Optional[int] =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case : Tuple =None
if self.use_labels:
snake_case : str =ids_tensor([self.batch_size], self.num_labels )
snake_case : Tuple =self.get_config()
return config, pixel_values, labels
def __snake_case ( self : List[str] ):
'''simple docstring'''
return RegNetConfig(
num_channels=self.num_channels, embeddings_size=self.embeddings_size, hidden_sizes=self.hidden_sizes, depths=self.depths, hidden_act=self.hidden_act, num_labels=self.num_labels, )
def __snake_case ( self : Optional[Any], _snake_case : str, _snake_case : List[str], _snake_case : List[Any] ):
'''simple docstring'''
snake_case : List[str] =TFRegNetModel(config=_snake_case )
snake_case : List[str] =model(_snake_case, training=_snake_case )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32), )
def __snake_case ( self : Tuple, _snake_case : Union[str, Any], _snake_case : List[str], _snake_case : List[str] ):
'''simple docstring'''
snake_case : Optional[int] =self.num_labels
snake_case : Optional[Any] =TFRegNetForImageClassification(_snake_case )
snake_case : List[Any] =model(_snake_case, labels=_snake_case, training=_snake_case )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def __snake_case ( self : List[str] ):
'''simple docstring'''
snake_case : List[Any] =self.prepare_config_and_inputs()
snake_case , snake_case , snake_case : Union[str, Any] =config_and_inputs
snake_case : Optional[Any] ={'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( a_ , a_ , unittest.TestCase ):
__UpperCAmelCase = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
__UpperCAmelCase = (
{'feature-extraction': TFRegNetModel, 'image-classification': TFRegNetForImageClassification}
if is_tf_available()
else {}
)
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
def __snake_case ( self : Any ):
'''simple docstring'''
snake_case : List[Any] =TFRegNetModelTester(self )
snake_case : List[str] =ConfigTester(self, config_class=_snake_case, has_text_modality=_snake_case )
def __snake_case ( self : Any ):
'''simple docstring'''
return
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def __snake_case ( self : Optional[Any] ):
'''simple docstring'''
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('''GPU''' ) ) == 0, reason='''TF does not support backprop for grouped convolutions on CPU.''', )
@slow
def __snake_case ( self : Any ):
'''simple docstring'''
super().test_keras_fit()
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def __snake_case ( self : int ):
'''simple docstring'''
pass
def __snake_case ( self : Any ):
'''simple docstring'''
snake_case , snake_case : int =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case : Union[str, Any] =model_class(_snake_case )
snake_case : List[str] =inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case : Optional[Any] =[*signature.parameters.keys()]
snake_case : Optional[Any] =['''pixel_values''']
self.assertListEqual(arg_names[:1], _snake_case )
def __snake_case ( self : Optional[Any] ):
'''simple docstring'''
snake_case : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def __snake_case ( self : Union[str, Any] ):
'''simple docstring'''
def check_hidden_states_output(_snake_case : List[str], _snake_case : str, _snake_case : str ):
snake_case : str =model_class(_snake_case )
snake_case : Optional[Any] =model(**self._prepare_for_class(_snake_case, _snake_case ), training=_snake_case )
snake_case : Union[str, Any] =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
snake_case : List[str] =self.model_tester.num_stages
self.assertEqual(len(_snake_case ), expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [self.model_tester.image_size // 2, self.model_tester.image_size // 2], )
snake_case , snake_case : Optional[int] =self.model_tester.prepare_config_and_inputs_for_common()
snake_case : Optional[Any] =['''basic''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
snake_case : Union[str, Any] =layer_type
snake_case : Dict =True
check_hidden_states_output(_snake_case, _snake_case, _snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case : int =True
check_hidden_states_output(_snake_case, _snake_case, _snake_case )
def __snake_case ( self : Dict ):
'''simple docstring'''
snake_case , snake_case : int =self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(_snake_case : Dict, _snake_case : Union[str, Any], _snake_case : List[str], _snake_case : str={} ):
snake_case : Optional[int] =model(_snake_case, return_dict=_snake_case, **_snake_case )
snake_case : Optional[int] =model(_snake_case, return_dict=_snake_case, **_snake_case ).to_tuple()
def recursive_check(_snake_case : Any, _snake_case : List[str] ):
if isinstance(_snake_case, (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(_snake_case, _snake_case ):
recursive_check(_snake_case, _snake_case )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(_snake_case, _snake_case ) ), msg=(
'''Tuple and dict output are not equal. Difference:'''
f''' {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}'''
), )
recursive_check(_snake_case, _snake_case )
for model_class in self.all_model_classes:
snake_case : Optional[Any] =model_class(_snake_case )
snake_case : int =self._prepare_for_class(_snake_case, _snake_case )
snake_case : List[Any] =self._prepare_for_class(_snake_case, _snake_case )
check_equivalence(_snake_case, _snake_case, _snake_case )
snake_case : Dict =self._prepare_for_class(_snake_case, _snake_case, return_labels=_snake_case )
snake_case : Optional[int] =self._prepare_for_class(_snake_case, _snake_case, return_labels=_snake_case )
check_equivalence(_snake_case, _snake_case, _snake_case )
snake_case : List[str] =self._prepare_for_class(_snake_case, _snake_case )
snake_case : int =self._prepare_for_class(_snake_case, _snake_case )
check_equivalence(_snake_case, _snake_case, _snake_case, {'''output_hidden_states''': True} )
snake_case : Tuple =self._prepare_for_class(_snake_case, _snake_case, return_labels=_snake_case )
snake_case : Any =self._prepare_for_class(_snake_case, _snake_case, return_labels=_snake_case )
check_equivalence(_snake_case, _snake_case, _snake_case, {'''output_hidden_states''': True} )
def __snake_case ( self : int ):
'''simple docstring'''
snake_case : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case )
@slow
def __snake_case ( self : Any ):
'''simple docstring'''
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case : str =TFRegNetModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def _a ( ):
snake_case : Any =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
@cached_property
def __snake_case ( self : List[Any] ):
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __snake_case ( self : Optional[Any] ):
'''simple docstring'''
snake_case : int =TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
snake_case : Any =self.default_image_processor
snake_case : Tuple =prepare_img()
snake_case : List[Any] =image_processor(images=_snake_case, return_tensors='''tf''' )
# forward pass
snake_case : Tuple =model(**_snake_case, training=_snake_case )
# verify the logits
snake_case : Tuple =tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape, _snake_case )
snake_case : Tuple =tf.constant([-0.4180, -1.5051, -3.4836] )
tf.debugging.assert_near(outputs.logits[0, :3], _snake_case, atol=1E-4 )
| 349 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
A : Optional[Any] = logging.get_logger(__name__)
A : Optional[Any] = {
"""deepmind/language-perceiver""": """https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json""",
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class lowerCAmelCase_ ( a_ ):
__UpperCAmelCase = 'perceiver'
def __init__( self : List[str], _snake_case : Optional[Any]=256, _snake_case : int=1_280, _snake_case : Optional[int]=768, _snake_case : List[str]=1, _snake_case : str=26, _snake_case : Union[str, Any]=8, _snake_case : Optional[int]=8, _snake_case : Optional[int]=None, _snake_case : str=None, _snake_case : List[str]="kv", _snake_case : str=1, _snake_case : Optional[Any]=1, _snake_case : str="gelu", _snake_case : List[Any]=0.1, _snake_case : Any=0.02, _snake_case : Union[str, Any]=1E-12, _snake_case : str=True, _snake_case : Any=262, _snake_case : Union[str, Any]=2_048, _snake_case : List[str]=56, _snake_case : Tuple=[368, 496], _snake_case : Dict=16, _snake_case : Tuple=1_920, _snake_case : Optional[Any]=16, _snake_case : Optional[Any]=[1, 16, 224, 224], **_snake_case : Optional[Any], ):
'''simple docstring'''
super().__init__(**_snake_case )
snake_case : Union[str, Any] =num_latents
snake_case : str =d_latents
snake_case : Any =d_model
snake_case : Any =num_blocks
snake_case : Tuple =num_self_attends_per_block
snake_case : int =num_self_attention_heads
snake_case : str =num_cross_attention_heads
snake_case : List[Any] =qk_channels
snake_case : Tuple =v_channels
snake_case : str =cross_attention_shape_for_attention
snake_case : Union[str, Any] =self_attention_widening_factor
snake_case : Union[str, Any] =cross_attention_widening_factor
snake_case : Optional[int] =hidden_act
snake_case : Any =attention_probs_dropout_prob
snake_case : int =initializer_range
snake_case : str =layer_norm_eps
snake_case : Dict =use_query_residual
# masked language modeling attributes
snake_case : List[Any] =vocab_size
snake_case : List[Any] =max_position_embeddings
# image classification attributes
snake_case : List[str] =image_size
# flow attributes
snake_case : Optional[Any] =train_size
# multimodal autoencoding attributes
snake_case : Dict =num_frames
snake_case : Optional[Any] =audio_samples_per_frame
snake_case : Dict =samples_per_patch
snake_case : Union[str, Any] =output_shape
class lowerCAmelCase_ ( a_ ):
@property
def __snake_case ( self : List[Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
snake_case : Tuple ={0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
snake_case : Union[str, Any] ={0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''inputs''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
@property
def __snake_case ( self : List[str] ):
'''simple docstring'''
return 1E-4
def __snake_case ( self : Dict, _snake_case : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"], _snake_case : int = -1, _snake_case : int = -1, _snake_case : int = -1, _snake_case : bool = False, _snake_case : Optional[TensorType] = None, _snake_case : int = 3, _snake_case : int = 40, _snake_case : int = 40, ):
'''simple docstring'''
if isinstance(_snake_case, _snake_case ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
snake_case : Any =compute_effective_axis_dimension(
_snake_case, fixed_dimension=OnnxConfig.default_fixed_batch, num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
snake_case : Tuple =preprocessor.num_special_tokens_to_add(_snake_case )
snake_case : Optional[Any] =compute_effective_axis_dimension(
_snake_case, fixed_dimension=OnnxConfig.default_fixed_sequence, num_token_to_add=_snake_case )
# Generate dummy inputs according to compute batch and sequence
snake_case : str =[''' '''.join(['''a'''] ) * seq_length] * batch_size
snake_case : int =dict(preprocessor(_snake_case, return_tensors=_snake_case ) )
snake_case : List[str] =inputs.pop('''input_ids''' )
return inputs
elif isinstance(_snake_case, _snake_case ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
snake_case : Union[str, Any] =compute_effective_axis_dimension(_snake_case, fixed_dimension=OnnxConfig.default_fixed_batch )
snake_case : Dict =self._generate_dummy_images(_snake_case, _snake_case, _snake_case, _snake_case )
snake_case : Optional[Any] =dict(preprocessor(images=_snake_case, return_tensors=_snake_case ) )
snake_case : Optional[Any] =inputs.pop('''pixel_values''' )
return inputs
else:
raise ValueError(
'''Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.''' )
| 349 | 1 |
from decimal import Decimal, getcontext
from math import ceil, factorial
def _lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise TypeError('''Undefined for non-integers''' )
elif precision < 1:
raise ValueError('''Undefined for non-natural numbers''' )
A_ = precision
A_ = ceil(precision / 14 )
A_ = 426880 * Decimal(10005 ).sqrt()
A_ = 1
A_ = 13591409
A_ = Decimal(SCREAMING_SNAKE_CASE )
for k in range(1 , SCREAMING_SNAKE_CASE ):
A_ = factorial(6 * k ) // (factorial(3 * k ) * factorial(SCREAMING_SNAKE_CASE ) ** 3)
linear_term += 545140134
exponential_term *= -262537412640768000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
__lowercase = 50
print(f'The first {n} digits of pi is: {pi(n)}')
| 563 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__lowercase = {"""configuration_deit""": ["""DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DeiTConfig""", """DeiTOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = ["""DeiTFeatureExtractor"""]
__lowercase = ["""DeiTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
"""DEIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DeiTForImageClassification""",
"""DeiTForImageClassificationWithTeacher""",
"""DeiTForMaskedImageModeling""",
"""DeiTModel""",
"""DeiTPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
"""TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDeiTForImageClassification""",
"""TFDeiTForImageClassificationWithTeacher""",
"""TFDeiTForMaskedImageModeling""",
"""TFDeiTModel""",
"""TFDeiTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 563 | 1 |
"""simple docstring"""
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
return "".join([hex(lowerCAmelCase_ )[2:].zfill(2 ).upper() for byte in list(lowerCAmelCase_ )] )
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
if (len(lowerCAmelCase_ ) % 2) != 0:
raise ValueError(
"Base16 encoded data is invalid:\nData does not have an even number of hex digits." )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(lowerCAmelCase_ ) <= set("0123456789ABCDEF" ):
raise ValueError(
"Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters." )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(lowerCAmelCase_ ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 682 |
"""simple docstring"""
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
a__ : Optional[Any] = 1_6
a__ : str = 3_2
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ = 16 ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("bert-base-cased" )
__SCREAMING_SNAKE_CASE = load_dataset("glue" , "mrpc" )
def tokenize_function(lowerCAmelCase_ ):
# max_length=None => use the model max length (it's actually the default)
__SCREAMING_SNAKE_CASE = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__SCREAMING_SNAKE_CASE = datasets.map(
lowerCAmelCase_ , batched=lowerCAmelCase_ , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__SCREAMING_SNAKE_CASE = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(lowerCAmelCase_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__SCREAMING_SNAKE_CASE = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__SCREAMING_SNAKE_CASE = 16
elif accelerator.mixed_precision != "no":
__SCREAMING_SNAKE_CASE = 8
else:
__SCREAMING_SNAKE_CASE = None
return tokenizer.pad(
lowerCAmelCase_ , padding="longest" , max_length=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_tensors="pt" , )
# Instantiate dataloaders.
__SCREAMING_SNAKE_CASE = DataLoader(
tokenized_datasets["train"] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = DataLoader(
tokenized_datasets["validation"] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
a__ : List[Any] = mocked_dataloaders # noqa: F811
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
if os.environ.get("TESTING_MOCKED_DATALOADERS" , lowerCAmelCase_ ) == "1":
__SCREAMING_SNAKE_CASE = 2
# Initialize accelerator
__SCREAMING_SNAKE_CASE = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__SCREAMING_SNAKE_CASE = config["lr"]
__SCREAMING_SNAKE_CASE = int(config["num_epochs"] )
__SCREAMING_SNAKE_CASE = int(config["seed"] )
__SCREAMING_SNAKE_CASE = int(config["batch_size"] )
__SCREAMING_SNAKE_CASE = evaluate.load("glue" , "mrpc" )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=lowerCAmelCase_ )
def inner_training_loop(lowerCAmelCase_ ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(lowerCAmelCase_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__SCREAMING_SNAKE_CASE = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=lowerCAmelCase_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__SCREAMING_SNAKE_CASE = model.to(accelerator.device )
# Instantiate optimizer
__SCREAMING_SNAKE_CASE = AdamW(params=model.parameters() , lr=lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = get_dataloaders(lowerCAmelCase_ , lowerCAmelCase_ )
# Instantiate scheduler
__SCREAMING_SNAKE_CASE = get_linear_schedule_with_warmup(
optimizer=lowerCAmelCase_ , num_warmup_steps=100 , num_training_steps=(len(lowerCAmelCase_ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = accelerator.prepare(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Now we train the model
for epoch in range(lowerCAmelCase_ ):
model.train()
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__SCREAMING_SNAKE_CASE = model(**lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = outputs.loss
accelerator.backward(lowerCAmelCase_ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(**lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = outputs.logits.argmax(dim=-1 )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=lowerCAmelCase_ , references=lowerCAmelCase_ , )
__SCREAMING_SNAKE_CASE = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , lowerCAmelCase_ )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def UpperCAmelCase__ ():
'''simple docstring'''
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=lowerCAmelCase_ , default=lowerCAmelCase_ , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
__SCREAMING_SNAKE_CASE = parser.parse_args()
__SCREAMING_SNAKE_CASE = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 682 | 1 |
'''simple docstring'''
import datasets
from .evaluate import evaluate
UpperCamelCase__: Tuple = "\\n@article{hendrycks2021cuad,\n title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},\n author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},\n journal={arXiv preprint arXiv:2103.06268},\n year={2021}\n}\n"
UpperCamelCase__: Union[str, Any] = "\nThis metric wrap the official scoring script for version 1 of the Contract\nUnderstanding Atticus Dataset (CUAD).\nContract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510\ncommercial legal contracts that have been manually labeled to identify 41 categories of important\nclauses that lawyers look for when reviewing contracts in connection with corporate transactions.\n"
UpperCamelCase__: Optional[Any] = "\nComputes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': list of possible texts for the answer, as a list of strings\n depending on a threshold on the confidence probability of each prediction.\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the CUAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\n \'aupr\': Area Under the Precision-Recall curve\n \'prec_at_80_recall\': Precision at 80% recall\n \'prec_at_90_recall\': Precision at 90% recall\nExamples:\n >>> predictions = [{\'prediction_text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\'], \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> references = [{\'answers\': {\'answer_start\': [143, 49], \'text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\']}, \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> cuad_metric = datasets.load_metric(\"cuad\")\n >>> results = cuad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0, \'aupr\': 0.0, \'prec_at_80_recall\': 1.0, \'prec_at_90_recall\': 1.0}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE( datasets.Metric ):
"""simple docstring"""
def A ( self : str ) -> List[str]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': {
'''id''': datasets.Value('''string''' ),
'''prediction_text''': datasets.features.Sequence(datasets.Value('''string''' ) ),
},
'''references''': {
'''id''': datasets.Value('''string''' ),
'''answers''': datasets.features.Sequence(
{
'''text''': datasets.Value('''string''' ),
'''answer_start''': datasets.Value('''int32''' ),
} ),
},
} ) , codebase_urls=['''https://www.atticusprojectai.org/cuad'''] , reference_urls=['''https://www.atticusprojectai.org/cuad'''] , )
def A ( self : List[Any] , __snake_case : int , __snake_case : int ) -> Tuple:
UpperCAmelCase : Tuple = {prediction["""id"""]: prediction["""prediction_text"""] for prediction in predictions}
UpperCAmelCase : Tuple = [
{
"""paragraphs""": [
{
"""qas""": [
{
"""answers""": [{"""text""": answer_text} for answer_text in ref["""answers"""]["""text"""]],
"""id""": ref["""id"""],
}
for ref in references
]
}
]
}
]
UpperCAmelCase : Optional[int] = evaluate(dataset=a_ , predictions=a_ )
return score
| 706 |
'''simple docstring'''
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
UpperCamelCase__: List[str] = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE( A__ ):
"""simple docstring"""
def A ( self : str , __snake_case : Tuple , __snake_case : int , __snake_case : List[Any]=None , __snake_case : List[Any]=None ) -> Optional[int]:
UpperCAmelCase : Union[str, Any] = self.layer[current_layer](__snake_case , __snake_case , head_mask[current_layer] )
UpperCAmelCase : Optional[Any] = layer_outputs[0]
return hidden_states
@add_start_docstrings(
"""The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top.""" , A__ , )
class SCREAMING_SNAKE_CASE( A__ ):
"""simple docstring"""
def __init__( self : int , __snake_case : Any ) -> Any:
super().__init__(__snake_case )
UpperCAmelCase : Any = BertEncoderWithPabee(__snake_case )
self.init_weights()
UpperCAmelCase : Optional[Any] = 0
UpperCAmelCase : List[str] = 0
UpperCAmelCase : Tuple = 0
UpperCAmelCase : List[str] = 0
def A ( self : List[str] , __snake_case : List[str] ) -> List[Any]:
UpperCAmelCase : Union[str, Any] = threshold
def A ( self : str , __snake_case : List[str] ) -> Optional[Any]:
UpperCAmelCase : List[str] = patience
def A ( self : Dict ) -> str:
UpperCAmelCase : List[Any] = 0
UpperCAmelCase : Dict = 0
def A ( self : Optional[Any] ) -> Optional[int]:
UpperCAmelCase : Dict = self.inference_layers_num / self.inference_instances_num
UpperCAmelCase : int = (
F"""*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ="""
F""" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***"""
)
print(__snake_case )
@add_start_docstrings_to_model_forward(__snake_case )
def A ( self : Union[str, Any] , __snake_case : Optional[Any]=None , __snake_case : str=None , __snake_case : int=None , __snake_case : Optional[Any]=None , __snake_case : Tuple=None , __snake_case : Tuple=None , __snake_case : Tuple=None , __snake_case : Optional[Any]=None , __snake_case : Union[str, Any]=None , __snake_case : Optional[int]=None , __snake_case : Optional[int]=False , ) -> List[str]:
if input_ids is not None and inputs_embeds is not None:
raise ValueError('''You cannot specify both input_ids and inputs_embeds at the same time''' )
elif input_ids is not None:
UpperCAmelCase : Union[str, Any] = input_ids.size()
elif inputs_embeds is not None:
UpperCAmelCase : List[Any] = inputs_embeds.size()[:-1]
else:
raise ValueError('''You have to specify either input_ids or inputs_embeds''' )
UpperCAmelCase : Optional[Any] = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
UpperCAmelCase : Optional[Any] = torch.ones(__snake_case , device=__snake_case )
if token_type_ids is None:
UpperCAmelCase : Tuple = torch.zeros(__snake_case , dtype=torch.long , device=__snake_case )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
UpperCAmelCase : torch.Tensor = self.get_extended_attention_mask(__snake_case , __snake_case , __snake_case )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Any = encoder_hidden_states.size()
UpperCAmelCase : List[Any] = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
UpperCAmelCase : str = torch.ones(__snake_case , device=__snake_case )
UpperCAmelCase : Tuple = self.invert_attention_mask(__snake_case )
else:
UpperCAmelCase : List[str] = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
UpperCAmelCase : List[Any] = self.get_head_mask(__snake_case , self.config.num_hidden_layers )
UpperCAmelCase : Any = self.embeddings(
input_ids=__snake_case , position_ids=__snake_case , token_type_ids=__snake_case , inputs_embeds=__snake_case )
UpperCAmelCase : Optional[int] = embedding_output
if self.training:
UpperCAmelCase : str = []
for i in range(self.config.num_hidden_layers ):
UpperCAmelCase : Optional[int] = self.encoder.adaptive_forward(
__snake_case , current_layer=__snake_case , attention_mask=__snake_case , head_mask=__snake_case )
UpperCAmelCase : List[str] = self.pooler(__snake_case )
UpperCAmelCase : Dict = output_layers[i](output_dropout(__snake_case ) )
res.append(__snake_case )
elif self.patience == 0: # Use all layers for inference
UpperCAmelCase : Any = self.encoder(
__snake_case , attention_mask=__snake_case , head_mask=__snake_case , encoder_hidden_states=__snake_case , encoder_attention_mask=__snake_case , )
UpperCAmelCase : Dict = self.pooler(encoder_outputs[0] )
UpperCAmelCase : List[Any] = [output_layers[self.config.num_hidden_layers - 1](__snake_case )]
else:
UpperCAmelCase : Tuple = 0
UpperCAmelCase : List[str] = None
UpperCAmelCase : Optional[Any] = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
UpperCAmelCase : Optional[Any] = self.encoder.adaptive_forward(
__snake_case , current_layer=__snake_case , attention_mask=__snake_case , head_mask=__snake_case )
UpperCAmelCase : List[str] = self.pooler(__snake_case )
UpperCAmelCase : Optional[int] = output_layers[i](__snake_case )
if regression:
UpperCAmelCase : Union[str, Any] = logits.detach()
if patient_result is not None:
UpperCAmelCase : List[str] = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
UpperCAmelCase : Dict = 0
else:
UpperCAmelCase : Union[str, Any] = logits.detach().argmax(dim=1 )
if patient_result is not None:
UpperCAmelCase : str = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(__snake_case ) ):
patient_counter += 1
else:
UpperCAmelCase : Union[str, Any] = 0
UpperCAmelCase : List[str] = logits
if patient_counter == self.patience:
break
UpperCAmelCase : List[Any] = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"""Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of
the pooled output) e.g. for GLUE tasks. """ , A__ , )
class SCREAMING_SNAKE_CASE( A__ ):
"""simple docstring"""
def __init__( self : str , __snake_case : str ) -> Optional[Any]:
super().__init__(__snake_case )
UpperCAmelCase : Optional[int] = config.num_labels
UpperCAmelCase : Optional[Any] = BertModelWithPabee(__snake_case )
UpperCAmelCase : Tuple = nn.Dropout(config.hidden_dropout_prob )
UpperCAmelCase : Dict = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(__snake_case )
def A ( self : Any , __snake_case : Dict=None , __snake_case : str=None , __snake_case : List[Any]=None , __snake_case : List[Any]=None , __snake_case : Union[str, Any]=None , __snake_case : int=None , __snake_case : str=None , ) -> Union[str, Any]:
UpperCAmelCase : Optional[int] = self.bert(
input_ids=__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , position_ids=__snake_case , head_mask=__snake_case , inputs_embeds=__snake_case , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
UpperCAmelCase : List[str] = (logits[-1],)
if labels is not None:
UpperCAmelCase : Tuple = None
UpperCAmelCase : Any = 0
for ix, logits_item in enumerate(__snake_case ):
if self.num_labels == 1:
# We are doing regression
UpperCAmelCase : Optional[Any] = MSELoss()
UpperCAmelCase : Optional[Any] = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
UpperCAmelCase : Dict = CrossEntropyLoss()
UpperCAmelCase : Dict = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
UpperCAmelCase : Dict = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
UpperCAmelCase : Any = (total_loss / total_weights,) + outputs
return outputs
| 528 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def __snake_case ( SCREAMING_SNAKE_CASE_ : List[Any] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class lowerCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase =StableDiffusionLatentUpscalePipeline
_lowerCamelCase =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"height",
"width",
"cross_attention_kwargs",
"negative_prompt_embeds",
"prompt_embeds",
}
_lowerCamelCase =PipelineTesterMixin.required_optional_params - {"num_images_per_prompt"}
_lowerCamelCase =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_lowerCamelCase =frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_lowerCamelCase =frozenset([] )
_lowerCamelCase =True
@property
def __snake_case ( self : Optional[Any] ):
UpperCAmelCase = 1
UpperCAmelCase = 4
UpperCAmelCase = (16, 16)
UpperCAmelCase = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(a__ )
return image
def __snake_case ( self : Tuple ):
torch.manual_seed(0 )
UpperCAmelCase = UNetaDConditionModel(
act_fn='''gelu''' , attention_head_dim=8 , norm_num_groups=a__ , block_out_channels=[32, 32, 64, 64] , time_cond_proj_dim=160 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=32 , down_block_types=(
'''KDownBlock2D''',
'''KCrossAttnDownBlock2D''',
'''KCrossAttnDownBlock2D''',
'''KCrossAttnDownBlock2D''',
) , in_channels=8 , mid_block_type=a__ , only_cross_attention=a__ , out_channels=5 , resnet_time_scale_shift='''scale_shift''' , time_embedding_type='''fourier''' , timestep_post_act='''gelu''' , up_block_types=('''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KUpBlock2D''') , )
UpperCAmelCase = AutoencoderKL(
block_out_channels=[32, 32, 64, 64] , in_channels=3 , out_channels=3 , down_block_types=[
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
UpperCAmelCase = EulerDiscreteScheduler(prediction_type='''sample''' )
UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''quick_gelu''' , projection_dim=512 , )
UpperCAmelCase = CLIPTextModel(a__ )
UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
UpperCAmelCase = {
'''unet''': model.eval(),
'''vae''': vae.eval(),
'''scheduler''': scheduler,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def __snake_case ( self : Any , a__ : Optional[int] , a__ : List[Any]=0 ):
if str(a__ ).startswith('''mps''' ):
UpperCAmelCase = torch.manual_seed(a__ )
else:
UpperCAmelCase = torch.Generator(device=a__ ).manual_seed(a__ )
UpperCAmelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': self.dummy_image.cpu(),
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def __snake_case ( self : Optional[int] ):
UpperCAmelCase = '''cpu'''
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = self.pipeline_class(**a__ )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
UpperCAmelCase = self.get_dummy_inputs(a__ )
UpperCAmelCase = pipe(**a__ ).images
UpperCAmelCase = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 256, 256, 3) )
UpperCAmelCase = np.array(
[0.47_222_412, 0.41_921_633, 0.44_717_434, 0.46_874_192, 0.42_588_258, 0.46_150_726, 0.4_677_534, 0.45_583_832, 0.48_579_055] )
UpperCAmelCase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(a__ , 1e-3 )
def __snake_case ( self : Dict ):
super().test_attention_slicing_forward_pass(expected_max_diff=7e-3 )
def __snake_case ( self : Optional[Any] ):
super().test_cpu_offload_forward_pass(expected_max_diff=3e-3 )
def __snake_case ( self : str ):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def __snake_case ( self : Optional[int] ):
super().test_inference_batch_single_identical(expected_max_diff=7e-3 )
def __snake_case ( self : List[Any] ):
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3e-3 )
def __snake_case ( self : Optional[int] ):
super().test_save_load_local(expected_max_difference=3e-3 )
def __snake_case ( self : List[str] ):
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def __snake_case ( self : Optional[Any] ):
UpperCAmelCase = [
'''DDIMScheduler''',
'''DDPMScheduler''',
'''PNDMScheduler''',
'''HeunDiscreteScheduler''',
'''EulerAncestralDiscreteScheduler''',
'''KDPM2DiscreteScheduler''',
'''KDPM2AncestralDiscreteScheduler''',
'''DPMSolverSDEScheduler''',
]
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = self.pipeline_class(**a__ )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=a__ )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
UpperCAmelCase = self.get_dummy_inputs(a__ )
UpperCAmelCase = 2
UpperCAmelCase = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
UpperCAmelCase = getattr(a__ , scheduler_enum.name )
UpperCAmelCase = scheduler_cls.from_config(pipe.scheduler.config )
UpperCAmelCase = pipe(**a__ )[0]
outputs.append(a__ )
assert check_same_shape(a__ )
@require_torch_gpu
@slow
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self : Union[str, Any] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case ( self : List[Any] ):
UpperCAmelCase = torch.manual_seed(33 )
UpperCAmelCase = StableDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' , torch_dtype=torch.floataa )
pipe.to('''cuda''' )
UpperCAmelCase = StableDiffusionLatentUpscalePipeline.from_pretrained(
'''stabilityai/sd-x2-latent-upscaler''' , torch_dtype=torch.floataa )
upscaler.to('''cuda''' )
UpperCAmelCase = '''a photo of an astronaut high resolution, unreal engine, ultra realistic'''
UpperCAmelCase = pipe(a__ , generator=a__ , output_type='''latent''' ).images
UpperCAmelCase = upscaler(
prompt=a__ , image=a__ , num_inference_steps=20 , guidance_scale=0 , generator=a__ , output_type='''np''' , ).images[0]
UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy''' )
assert np.abs((expected_image - image).mean() ) < 5e-2
def __snake_case ( self : Optional[int] ):
UpperCAmelCase = torch.manual_seed(33 )
UpperCAmelCase = StableDiffusionLatentUpscalePipeline.from_pretrained(
'''stabilityai/sd-x2-latent-upscaler''' , torch_dtype=torch.floataa )
upscaler.to('''cuda''' )
UpperCAmelCase = '''the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas'''
UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png''' )
UpperCAmelCase = upscaler(
prompt=a__ , image=a__ , num_inference_steps=20 , guidance_scale=0 , generator=a__ , output_type='''np''' , ).images[0]
UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy''' )
assert np.abs((expected_image - image).max() ) < 5e-2
| 51 |
'''simple docstring'''
def lowerCamelCase__ ( a__ , a__) -> float:
"""simple docstring"""
_validate_point(a__)
_validate_point(a__)
if len(a__) != len(a__):
raise ValueError('Both points must be in the same n-dimensional space')
return float(sum(abs(a - b) for a, b in zip(a__ , a__)))
def lowerCamelCase__ ( a__) -> None:
"""simple docstring"""
if point:
if isinstance(a__ , a__):
for item in point:
if not isinstance(a__ , (int, float)):
_snake_case : Any = (
'Expected a list of numbers as input, found '
F"""{type(a__).__name__}"""
)
raise TypeError(a__)
else:
_snake_case : Tuple = F"""Expected a list of numbers as input, found {type(a__).__name__}"""
raise TypeError(a__)
else:
raise ValueError('Missing an input')
def lowerCamelCase__ ( a__ , a__) -> float:
"""simple docstring"""
_validate_point(a__)
_validate_point(a__)
if len(a__) != len(a__):
raise ValueError('Both points must be in the same n-dimensional space')
return float(sum(abs(x - y) for x, y in zip(a__ , a__)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 517 | 0 |
def UpperCamelCase ( ) -> Dict:
'''simple docstring'''
return [
a * b * (10_00 - a - b)
for a in range(1 , 9_99 )
for b in range(_lowerCamelCase , 9_99 )
if (a * a + b * b == (10_00 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(F"{solution() = }")
| 716 | from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class lowerCAmelCase :
_SCREAMING_SNAKE_CASE : torch.Tensor # [batch_size x 3]
_SCREAMING_SNAKE_CASE : torch.Tensor # [batch_size x 3]
_SCREAMING_SNAKE_CASE : torch.Tensor # [batch_size x 3]
_SCREAMING_SNAKE_CASE : torch.Tensor # [batch_size x 3]
_SCREAMING_SNAKE_CASE : int
_SCREAMING_SNAKE_CASE : int
_SCREAMING_SNAKE_CASE : float
_SCREAMING_SNAKE_CASE : float
_SCREAMING_SNAKE_CASE : Tuple[int]
def a__ ( self ):
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def a__ ( self ):
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def a__ ( self ):
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def a__ ( self ):
_A= torch.arange(self.height * self.width )
_A= torch.stack(
[
pixel_indices % self.width,
torch.div(lowerCAmelCase__ , self.width , rounding_mode='trunc' ),
] , axis=1 , )
return coords
@property
def a__ ( self ):
_A, *_A= self.shape
_A= int(np.prod(lowerCAmelCase__ ) )
_A= self.get_image_coords()
_A= torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
_A= self.get_camera_rays(lowerCAmelCase__ )
_A= rays.view(lowerCAmelCase__ , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def a__ ( self , lowerCAmelCase__ ):
_A, *_A, _A= coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
_A= coords.view(lowerCAmelCase__ , -1 , 2 )
_A= self.resolution()
_A= self.fov()
_A= (flat.float() / (res - 1)) * 2 - 1
_A= fracs * torch.tan(fov / 2 )
_A= fracs.view(lowerCAmelCase__ , -1 , 2 )
_A= (
self.z.view(lowerCAmelCase__ , 1 , 3 )
+ self.x.view(lowerCAmelCase__ , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(lowerCAmelCase__ , 1 , 3 ) * fracs[:, :, 1:]
)
_A= directions / directions.norm(dim=-1 , keepdim=lowerCAmelCase__ )
_A= torch.stack(
[
torch.broadcast_to(self.origin.view(lowerCAmelCase__ , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(lowerCAmelCase__ , *lowerCAmelCase__ , 2 , 3 )
def a__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ):
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=lowerCAmelCase__ , height=lowerCAmelCase__ , x_fov=self.x_fov , y_fov=self.y_fov , )
def UpperCamelCase ( lowerCAmelCase_ ) -> DifferentiableProjectiveCamera:
'''simple docstring'''
_A= []
_A= []
_A= []
_A= []
for theta in np.linspace(0 , 2 * np.pi , num=20 ):
_A= np.array([np.sin(lowerCAmelCase_ ), np.cos(lowerCAmelCase_ ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
_A= -z * 4
_A= np.array([np.cos(lowerCAmelCase_ ), -np.sin(lowerCAmelCase_ ), 0.0] )
_A= np.cross(lowerCAmelCase_ , lowerCAmelCase_ )
origins.append(lowerCAmelCase_ )
xs.append(lowerCAmelCase_ )
ys.append(lowerCAmelCase_ )
zs.append(lowerCAmelCase_ )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(lowerCAmelCase_ , axis=0 ) ).float() , x=torch.from_numpy(np.stack(lowerCAmelCase_ , axis=0 ) ).float() , y=torch.from_numpy(np.stack(lowerCAmelCase_ , axis=0 ) ).float() , z=torch.from_numpy(np.stack(lowerCAmelCase_ , axis=0 ) ).float() , width=lowerCAmelCase_ , height=lowerCAmelCase_ , x_fov=0.7 , y_fov=0.7 , shape=(1, len(lowerCAmelCase_ )) , ) | 476 | 0 |
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class _A ( unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : Tuple =MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
'''simple docstring'''
UpperCamelCase__ = hf_hub_download(
repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' )
UpperCamelCase__ = VideoClassificationPipeline(model=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ , top_k=2 )
UpperCamelCase__ = [
example_video_filepath,
'''https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4''',
]
return video_classifier, examples
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
'''simple docstring'''
for example in examples:
UpperCamelCase__ = video_classifier(SCREAMING_SNAKE_CASE_ )
self.assertEqual(
SCREAMING_SNAKE_CASE_ , [
{'''score''': ANY(SCREAMING_SNAKE_CASE_ ), '''label''': ANY(SCREAMING_SNAKE_CASE_ )},
{'''score''': ANY(SCREAMING_SNAKE_CASE_ ), '''label''': ANY(SCREAMING_SNAKE_CASE_ )},
] , )
@require_torch
def _a (self ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ = '''hf-internal-testing/tiny-random-VideoMAEForVideoClassification'''
UpperCamelCase__ = VideoMAEFeatureExtractor(
size={'''shortest_edge''': 10} , crop_size={'''height''': 10, '''width''': 10} )
UpperCamelCase__ = pipeline(
'''video-classification''' , model=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , frame_sampling_rate=4 )
UpperCamelCase__ = hf_hub_download(repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' )
UpperCamelCase__ = video_classifier(SCREAMING_SNAKE_CASE_ , top_k=2 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [{'''score''': 0.5199, '''label''': '''LABEL_0'''}, {'''score''': 0.4801, '''label''': '''LABEL_1'''}] , )
UpperCamelCase__ = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
[{'''score''': 0.5199, '''label''': '''LABEL_0'''}, {'''score''': 0.4801, '''label''': '''LABEL_1'''}],
[{'''score''': 0.5199, '''label''': '''LABEL_0'''}, {'''score''': 0.4801, '''label''': '''LABEL_1'''}],
] , )
@require_tf
def _a (self ) -> Dict:
'''simple docstring'''
pass
| 415 | from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def __UpperCamelCase ( A , A , A=1e-12 ):
UpperCamelCase__ = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(A , axis=1 ) , a_min=A ) ).T
UpperCamelCase__ = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(A , axis=1 ) , a_min=A ) ).T
return jnp.matmul(A , norm_emb_a.T )
class _A ( nn.Module ):
SCREAMING_SNAKE_CASE_ : CLIPConfig
SCREAMING_SNAKE_CASE_ : jnp.dtype =jnp.floataa
def _a (self ) -> int:
'''simple docstring'''
UpperCamelCase__ = FlaxCLIPVisionModule(self.config.vision_config )
UpperCamelCase__ = nn.Dense(self.config.projection_dim , use_bias=SCREAMING_SNAKE_CASE_ , dtype=self.dtype )
UpperCamelCase__ = self.param('''concept_embeds''' , jax.nn.initializers.ones , (17, self.config.projection_dim) )
UpperCamelCase__ = self.param(
'''special_care_embeds''' , jax.nn.initializers.ones , (3, self.config.projection_dim) )
UpperCamelCase__ = self.param('''concept_embeds_weights''' , jax.nn.initializers.ones , (17,) )
UpperCamelCase__ = self.param('''special_care_embeds_weights''' , jax.nn.initializers.ones , (3,) )
def __call__(self , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = self.vision_model(SCREAMING_SNAKE_CASE_ )[1]
UpperCamelCase__ = self.visual_projection(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = jax_cosine_distance(SCREAMING_SNAKE_CASE_ , self.special_care_embeds )
UpperCamelCase__ = jax_cosine_distance(SCREAMING_SNAKE_CASE_ , self.concept_embeds )
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
UpperCamelCase__ = 0.0
UpperCamelCase__ = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
UpperCamelCase__ = jnp.round(SCREAMING_SNAKE_CASE_ , 3 )
UpperCamelCase__ = jnp.any(special_scores > 0 , axis=1 , keepdims=SCREAMING_SNAKE_CASE_ )
# Use a lower threshold if an image has any special care concept
UpperCamelCase__ = is_special_care * 0.01
UpperCamelCase__ = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
UpperCamelCase__ = jnp.round(SCREAMING_SNAKE_CASE_ , 3 )
UpperCamelCase__ = jnp.any(concept_scores > 0 , axis=1 )
return has_nsfw_concepts
class _A ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ : Optional[Any] =CLIPConfig
SCREAMING_SNAKE_CASE_ : Dict ="clip_input"
SCREAMING_SNAKE_CASE_ : Union[str, Any] =FlaxStableDiffusionSafetyCheckerModule
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 0 , SCREAMING_SNAKE_CASE_ = jnp.floataa , SCREAMING_SNAKE_CASE_ = True , **SCREAMING_SNAKE_CASE_ , ) -> List[str]:
'''simple docstring'''
if input_shape is None:
UpperCamelCase__ = (1, 224, 224, 3)
UpperCamelCase__ = self.module_class(config=SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
super().__init__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , input_shape=SCREAMING_SNAKE_CASE_ , seed=SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ , _do_init=_do_init )
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> FrozenDict:
'''simple docstring'''
UpperCamelCase__ = jax.random.normal(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ , UpperCamelCase__ = jax.random.split(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = {'''params''': params_rng, '''dropout''': dropout_rng}
UpperCamelCase__ = self.module.init(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )['''params''']
return random_params
def __call__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , ) -> str:
'''simple docstring'''
UpperCamelCase__ = jnp.transpose(SCREAMING_SNAKE_CASE_ , (0, 2, 3, 1) )
return self.module.apply(
{'''params''': params or self.params} , jnp.array(SCREAMING_SNAKE_CASE_ , dtype=jnp.floataa ) , rngs={} , )
| 415 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowercase__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = StableDiffusionInpaintPipeline
UpperCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
UpperCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
UpperCamelCase = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
UpperCamelCase = frozenset([] )
def lowercase__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=_UpperCAmelCase , )
UpperCAmelCase_ = PNDMScheduler(skip_prk_steps=_UpperCAmelCase )
torch.manual_seed(0 )
UpperCAmelCase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
UpperCAmelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , )
UpperCAmelCase_ = CLIPTextModel(_UpperCAmelCase )
UpperCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCAmelCase_ = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def lowercase__ ( self : Dict , _UpperCAmelCase : Dict , _UpperCAmelCase : Union[str, Any]=0 ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
UpperCAmelCase_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_ = Image.fromarray(np.uinta(_UpperCAmelCase ) ).convert("RGB" ).resize((64, 64) )
UpperCAmelCase_ = Image.fromarray(np.uinta(image + 4 ) ).convert("RGB" ).resize((64, 64) )
if str(_UpperCAmelCase ).startswith("mps" ):
UpperCAmelCase_ = torch.manual_seed(_UpperCAmelCase )
else:
UpperCAmelCase_ = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
UpperCAmelCase_ = {
"prompt": "A painting of a squirrel eating a burger",
"image": init_image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def lowercase__ ( self : Any ) -> int:
'''simple docstring'''
UpperCAmelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = StableDiffusionInpaintPipeline(**_UpperCAmelCase )
UpperCAmelCase_ = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCAmelCase_ = self.get_dummy_inputs(_UpperCAmelCase )
UpperCAmelCase_ = sd_pipe(**_UpperCAmelCase ).images
UpperCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase__ ( self : Optional[Any] ) -> str:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def lowercase__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Tuple ) -> str:
'''simple docstring'''
UpperCAmelCase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
UpperCAmelCase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
UpperCAmelCase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"
"/yellow_cat_sitting_on_a_park_bench.npy" )
UpperCAmelCase_ = "stabilityai/stable-diffusion-2-inpainting"
UpperCAmelCase_ = StableDiffusionInpaintPipeline.from_pretrained(_UpperCAmelCase , safety_checker=_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
UpperCAmelCase_ = "Face of a yellow cat, high resolution, sitting on a park bench"
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = pipe(
prompt=_UpperCAmelCase , image=_UpperCAmelCase , mask_image=_UpperCAmelCase , generator=_UpperCAmelCase , output_type="np" , )
UpperCAmelCase_ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 9e-3
def lowercase__ ( self : str ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
UpperCAmelCase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
UpperCAmelCase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"
"/yellow_cat_sitting_on_a_park_bench_fp16.npy" )
UpperCAmelCase_ = "stabilityai/stable-diffusion-2-inpainting"
UpperCAmelCase_ = StableDiffusionInpaintPipeline.from_pretrained(
_UpperCAmelCase , torch_dtype=torch.floataa , safety_checker=_UpperCAmelCase , )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
UpperCAmelCase_ = "Face of a yellow cat, high resolution, sitting on a park bench"
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = pipe(
prompt=_UpperCAmelCase , image=_UpperCAmelCase , mask_image=_UpperCAmelCase , generator=_UpperCAmelCase , output_type="np" , )
UpperCAmelCase_ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def lowercase__ ( self : Dict ) -> Dict:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCAmelCase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
UpperCAmelCase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
UpperCAmelCase_ = "stabilityai/stable-diffusion-2-inpainting"
UpperCAmelCase_ = PNDMScheduler.from_pretrained(_UpperCAmelCase , subfolder="scheduler" )
UpperCAmelCase_ = StableDiffusionInpaintPipeline.from_pretrained(
_UpperCAmelCase , safety_checker=_UpperCAmelCase , scheduler=_UpperCAmelCase , torch_dtype=torch.floataa , )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
UpperCAmelCase_ = "Face of a yellow cat, high resolution, sitting on a park bench"
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = pipe(
prompt=_UpperCAmelCase , image=_UpperCAmelCase , mask_image=_UpperCAmelCase , generator=_UpperCAmelCase , num_inference_steps=2 , output_type="np" , )
UpperCAmelCase_ = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 14 |
"""simple docstring"""
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def lowercase__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_UpperCAmelCase , "width_multiplier" ) )
class lowercase__ :
'''simple docstring'''
def __init__( self : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int]=13 , _UpperCAmelCase : Any=64 , _UpperCAmelCase : Union[str, Any]=2 , _UpperCAmelCase : Any=3 , _UpperCAmelCase : Dict="swish" , _UpperCAmelCase : List[Any]=3 , _UpperCAmelCase : int=32 , _UpperCAmelCase : str=0.1 , _UpperCAmelCase : Tuple=0.02 , _UpperCAmelCase : List[Any]=True , _UpperCAmelCase : Any=True , _UpperCAmelCase : int=10 , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : Any=0.25 , _UpperCAmelCase : Any=0.0 , _UpperCAmelCase : Optional[int]=0.0 , ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = image_size
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = make_divisible(512 * width_multiplier , divisor=8 )
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = conv_kernel_size
UpperCAmelCase_ = output_stride
UpperCAmelCase_ = classifier_dropout_prob
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = is_training
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = scope
UpperCAmelCase_ = width_multiplier
UpperCAmelCase_ = ffn_dropout
UpperCAmelCase_ = attn_dropout
def lowercase__ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ = None
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase_ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCAmelCase_ = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowercase__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def lowercase__ ( self : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int ) -> int:
'''simple docstring'''
UpperCAmelCase_ = MobileViTVaModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = model(_UpperCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowercase__ ( self : Optional[int] , _UpperCAmelCase : int , _UpperCAmelCase : Any , _UpperCAmelCase : str , _UpperCAmelCase : Tuple ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = MobileViTVaForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self : Any , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Tuple ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = MobileViTVaForSemanticSegmentation(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = model(_UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
UpperCAmelCase_ = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowercase__ ( self : List[str] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = config_and_inputs
UpperCAmelCase_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowercase__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
UpperCamelCase = (
{
'''feature-extraction''': MobileViTVaModel,
'''image-classification''': MobileViTVaForImageClassification,
'''image-segmentation''': MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def lowercase__ ( self : str ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = MobileViTVaModelTester(self )
UpperCAmelCase_ = MobileViTVaConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase )
def lowercase__ ( self : int ) -> List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileViTV2 does not use inputs_embeds" )
def lowercase__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip(reason="MobileViTV2 does not support input and output embeddings" )
def lowercase__ ( self : Tuple ) -> Dict:
'''simple docstring'''
pass
@unittest.skip(reason="MobileViTV2 does not output attentions" )
def lowercase__ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason="Got `CUDA error: misaligned address` for tests after this one being run." )
def lowercase__ ( self : int ) -> int:
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowercase__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
pass
def lowercase__ ( self : int ) -> int:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(_UpperCAmelCase )
UpperCAmelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def lowercase__ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def lowercase__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
def check_hidden_states_output(_UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : int ):
UpperCAmelCase_ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
UpperCAmelCase_ = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
UpperCAmelCase_ = outputs.hidden_states
UpperCAmelCase_ = 5
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
UpperCAmelCase_ = 2
for i in range(len(_UpperCAmelCase ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def lowercase__ ( self : int ) -> int:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
def lowercase__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_UpperCAmelCase )
@slow
def lowercase__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = MobileViTVaModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def a__ ( ):
UpperCAmelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowercase__ ( self : int ) -> List[Any]:
'''simple docstring'''
return (
MobileViTImageProcessor.from_pretrained("apple/mobilevitv2-1.0-imagenet1k-256" )
if is_vision_available()
else None
)
@slow
def lowercase__ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = MobileViTVaForImageClassification.from_pretrained("apple/mobilevitv2-1.0-imagenet1k-256" ).to(
_UpperCAmelCase )
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=_UpperCAmelCase , return_tensors="pt" ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase_ = model(**_UpperCAmelCase )
# verify the logits
UpperCAmelCase_ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
UpperCAmelCase_ = torch.tensor([-1.6336e00, -7.3204e-02, -5.1883e-01] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1e-4 ) )
@slow
def lowercase__ ( self : List[str] ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = MobileViTVaForSemanticSegmentation.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
UpperCAmelCase_ = model.to(_UpperCAmelCase )
UpperCAmelCase_ = MobileViTImageProcessor.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=_UpperCAmelCase , return_tensors="pt" ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase_ = model(**_UpperCAmelCase )
UpperCAmelCase_ = outputs.logits
# verify the logits
UpperCAmelCase_ = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , _UpperCAmelCase )
UpperCAmelCase_ = torch.tensor(
[
[[7.0863, 7.1525, 6.8201], [6.6931, 6.8770, 6.8933], [6.2978, 7.0366, 6.9636]],
[[-3.7134, -3.6712, -3.6675], [-3.5825, -3.3549, -3.4777], [-3.3435, -3.3979, -3.2857]],
[[-2.9329, -2.8003, -2.7369], [-3.0564, -2.4780, -2.0207], [-2.6889, -1.9298, -1.7640]],
] , device=_UpperCAmelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _UpperCAmelCase , atol=1e-4 ) )
@slow
def lowercase__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = MobileViTVaForSemanticSegmentation.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
UpperCAmelCase_ = model.to(_UpperCAmelCase )
UpperCAmelCase_ = MobileViTImageProcessor.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=_UpperCAmelCase , return_tensors="pt" ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase_ = model(**_UpperCAmelCase )
UpperCAmelCase_ = outputs.logits.detach().cpu()
UpperCAmelCase_ = image_processor.post_process_semantic_segmentation(outputs=_UpperCAmelCase , target_sizes=[(50, 60)] )
UpperCAmelCase_ = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , _UpperCAmelCase )
UpperCAmelCase_ = image_processor.post_process_semantic_segmentation(outputs=_UpperCAmelCase )
UpperCAmelCase_ = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , _UpperCAmelCase )
| 14 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ["""NllbTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ["""NllbTokenizerFast"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 437 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {"""configuration_opt""": ["""OPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """OPTConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"""OPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""OPTForCausalLM""",
"""OPTModel""",
"""OPTPreTrainedModel""",
"""OPTForSequenceClassification""",
"""OPTForQuestionAnswering""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ["""TFOPTForCausalLM""", """TFOPTModel""", """TFOPTPreTrainedModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"""FlaxOPTForCausalLM""",
"""FlaxOPTModel""",
"""FlaxOPTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 437 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = '''megatron-bert'''
def __init__( self : Dict , _UpperCAmelCase : int=29_056 , _UpperCAmelCase : Optional[Any]=1_024 , _UpperCAmelCase : Any=24 , _UpperCAmelCase : Any=16 , _UpperCAmelCase : Dict=4_096 , _UpperCAmelCase : int="gelu" , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : List[str]=0.1 , _UpperCAmelCase : List[Any]=512 , _UpperCAmelCase : Tuple=2 , _UpperCAmelCase : Optional[Any]=0.02 , _UpperCAmelCase : Optional[Any]=1E-1_2 , _UpperCAmelCase : Tuple=0 , _UpperCAmelCase : Optional[Any]="absolute" , _UpperCAmelCase : Dict=True , **_UpperCAmelCase : Optional[Any] , ):
super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase )
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = hidden_act
_A = intermediate_size
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = type_vocab_size
_A = initializer_range
_A = layer_norm_eps
_A = position_embedding_type
_A = use_cache
| 505 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
'''tanreinama/GPTSAN-2.8B-spout_is_uniform''': (
'''https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json'''
),
}
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : int = '''gptsan-japanese'''
UpperCAmelCase : List[Any] = [
'''past_key_values''',
]
UpperCAmelCase : List[Any] = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : Any , _UpperCAmelCase : List[Any]=36_000 , _UpperCAmelCase : str=1_280 , _UpperCAmelCase : Tuple=1_024 , _UpperCAmelCase : Union[str, Any]=8_192 , _UpperCAmelCase : Any=4_096 , _UpperCAmelCase : Optional[int]=128 , _UpperCAmelCase : int=10 , _UpperCAmelCase : Union[str, Any]=0 , _UpperCAmelCase : List[Any]=16 , _UpperCAmelCase : List[Any]=16 , _UpperCAmelCase : Optional[Any]=128 , _UpperCAmelCase : Any=0.0 , _UpperCAmelCase : Optional[Any]=1E-5 , _UpperCAmelCase : Dict=False , _UpperCAmelCase : Optional[int]=0.0 , _UpperCAmelCase : List[str]="float32" , _UpperCAmelCase : List[Any]=False , _UpperCAmelCase : str=False , _UpperCAmelCase : Union[str, Any]=False , _UpperCAmelCase : Optional[Any]=0.002 , _UpperCAmelCase : Optional[Any]=False , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : Union[str, Any]=35_998 , _UpperCAmelCase : Any=35_995 , _UpperCAmelCase : Any=35_999 , **_UpperCAmelCase : Any , ):
_A = vocab_size
_A = max_position_embeddings
_A = d_model
_A = d_ff
_A = d_ext
_A = d_spout
_A = num_switch_layers
_A = num_ext_layers
_A = num_switch_layers + num_ext_layers
_A = num_heads
_A = num_experts
_A = expert_capacity
_A = dropout_rate
_A = layer_norm_epsilon
_A = router_bias
_A = router_jitter_noise
_A = router_dtype
_A = router_ignore_padding_tokens
_A = output_hidden_states
_A = output_attentions
_A = initializer_factor
_A = output_router_logits
_A = use_cache
super().__init__(
separator_token_id=_UpperCAmelCase , pad_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase , )
| 505 | 1 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class A( UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = ShapEPipeline
UpperCamelCase = ['''prompt''']
UpperCamelCase = ['''prompt''']
UpperCamelCase = [
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
UpperCamelCase = False
@property
def a__ ( self : Any ) -> str:
"""simple docstring"""
return 32
@property
def a__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
return 32
@property
def a__ ( self : List[Any] ) -> int:
"""simple docstring"""
return self.time_input_dim * 4
@property
def a__ ( self : Any ) -> Tuple:
"""simple docstring"""
return 8
@property
def a__ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def a__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(A_ )
@property
def a__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase_ = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
lowerCamelCase_ = PriorTransformer(**A_ )
return model
@property
def a__ ( self : Optional[Any] ) -> Any:
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase_ = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
lowerCamelCase_ = ShapERenderer(**A_ )
return model
def a__ ( self : Optional[Any] ) -> str:
"""simple docstring"""
lowerCamelCase_ = self.dummy_prior
lowerCamelCase_ = self.dummy_text_encoder
lowerCamelCase_ = self.dummy_tokenizer
lowerCamelCase_ = self.dummy_renderer
lowerCamelCase_ = HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=1024 , prediction_type='sample' , use_karras_sigmas=A_ , clip_sample=A_ , clip_sample_range=1.0 , )
lowerCamelCase_ = {
'prior': prior,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def a__ ( self : Tuple , A_ : Any , A_ : Tuple=0 ) -> Optional[int]:
"""simple docstring"""
if str(A_ ).startswith('mps' ):
lowerCamelCase_ = torch.manual_seed(A_ )
else:
lowerCamelCase_ = torch.Generator(device=A_ ).manual_seed(A_ )
lowerCamelCase_ = {
'prompt': 'horse',
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def a__ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = 'cpu'
lowerCamelCase_ = self.get_dummy_components()
lowerCamelCase_ = self.pipeline_class(**A_ )
lowerCamelCase_ = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
lowerCamelCase_ = pipe(**self.get_dummy_inputs(A_ ) )
lowerCamelCase_ = output.images[0]
lowerCamelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
lowerCamelCase_ = np.array(
[
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def a__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def a__ ( self : str ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = torch_device == 'cpu'
lowerCamelCase_ = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=A_ , relax_max_difference=A_ , )
def a__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = self.get_dummy_components()
lowerCamelCase_ = self.pipeline_class(**A_ )
lowerCamelCase_ = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
lowerCamelCase_ = 1
lowerCamelCase_ = 2
lowerCamelCase_ = self.get_dummy_inputs(A_ )
for key in inputs.keys():
if key in self.batch_params:
lowerCamelCase_ = batch_size * [inputs[key]]
lowerCamelCase_ = pipe(**A_ , num_images_per_prompt=A_ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class A( unittest.TestCase ):
'''simple docstring'''
def a__ ( self : List[str] ) -> int:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self : str ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_np_out.npy' )
lowerCamelCase_ = ShapEPipeline.from_pretrained('openai/shap-e' )
lowerCamelCase_ = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
lowerCamelCase_ = torch.Generator(device=A_ ).manual_seed(0 )
lowerCamelCase_ = pipe(
'a shark' , generator=A_ , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(A_ , A_ )
| 70 | import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def __lowerCAmelCase ( A_ : Union[str, Any] , A_ : Optional[Any]=10 ) -> Optional[int]:
__UpperCAmelCase = []
for _ in range(A_ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def __lowerCAmelCase ( A_ : str , A_ : List[Any]=10 ) -> List[Any]:
__UpperCAmelCase = []
for step in range(A_ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase = os.path.join(A_ , "schedule.bin" )
torch.save(scheduler.state_dict() , A_ )
__UpperCAmelCase = torch.load(A_ )
scheduler.load_state_dict(A_ )
return lrs
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCAmelCase ( self: List[str] , __lowerCAmelCase: List[str] , __lowerCAmelCase: Optional[Any] , __lowerCAmelCase: Dict ) -> str:
'''simple docstring'''
self.assertEqual(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) )
for a, b in zip(__lowerCAmelCase , __lowerCAmelCase ):
self.assertAlmostEqual(__lowerCAmelCase , __lowerCAmelCase , delta=__lowerCAmelCase )
def _UpperCAmelCase ( self: Optional[int] ) -> str:
'''simple docstring'''
__UpperCAmelCase = torch.tensor([0.1, -0.2, -0.1] , requires_grad=__lowerCAmelCase )
__UpperCAmelCase = torch.tensor([0.4, 0.2, -0.5] )
__UpperCAmelCase = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
__UpperCAmelCase = AdamW(params=[w] , lr=2E-1 , weight_decay=0.0 )
for _ in range(100 ):
__UpperCAmelCase = criterion(__lowerCAmelCase , __lowerCAmelCase )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
def _UpperCAmelCase ( self: Optional[int] ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase = torch.tensor([0.1, -0.2, -0.1] , requires_grad=__lowerCAmelCase )
__UpperCAmelCase = torch.tensor([0.4, 0.2, -0.5] )
__UpperCAmelCase = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
__UpperCAmelCase = Adafactor(
params=[w] , lr=1E-2 , eps=(1E-30, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=__lowerCAmelCase , weight_decay=0.0 , relative_step=__lowerCAmelCase , scale_parameter=__lowerCAmelCase , warmup_init=__lowerCAmelCase , )
for _ in range(1_000 ):
__UpperCAmelCase = criterion(__lowerCAmelCase , __lowerCAmelCase )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ : Tuple = nn.Linear(50 , 50 ) if is_torch_available() else None
lowerCAmelCase__ : Optional[int] = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None
lowerCAmelCase__ : Optional[int] = 10
def _UpperCAmelCase ( self: Optional[int] , __lowerCAmelCase: List[str] , __lowerCAmelCase: Any , __lowerCAmelCase: List[Any] , __lowerCAmelCase: int=None ) -> List[Any]:
'''simple docstring'''
self.assertEqual(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) )
for a, b in zip(__lowerCAmelCase , __lowerCAmelCase ):
self.assertAlmostEqual(__lowerCAmelCase , __lowerCAmelCase , delta=__lowerCAmelCase , msg=__lowerCAmelCase )
def _UpperCAmelCase ( self: Union[str, Any] ) -> Any:
'''simple docstring'''
__UpperCAmelCase = {"num_warmup_steps": 2, "num_training_steps": 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
__UpperCAmelCase = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{"num_warmup_steps": 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, "num_cycles": 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, "power": 2.0, "lr_end": 1E-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{"num_warmup_steps": 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
__UpperCAmelCase , __UpperCAmelCase = data
__UpperCAmelCase = scheduler_func(self.optimizer , **__lowerCAmelCase )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
__UpperCAmelCase = unwrap_schedule(__lowerCAmelCase , self.num_steps )
self.assertListAlmostEqual(
__lowerCAmelCase , __lowerCAmelCase , tol=1E-2 , msg=F'''failed for {scheduler_func} in normal scheduler''' , )
__UpperCAmelCase = scheduler_func(self.optimizer , **__lowerCAmelCase )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(__lowerCAmelCase ) # wrap to test picklability of the schedule
__UpperCAmelCase = unwrap_and_save_reload_schedule(__lowerCAmelCase , self.num_steps )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase , msg=F'''failed for {scheduler_func} in save and reload''' )
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self: Union[str, Any] , __lowerCAmelCase: Tuple ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase = fn
def __call__( self: int , *__lowerCAmelCase: List[str] , **__lowerCAmelCase: Any ) -> List[Any]:
'''simple docstring'''
return self.fn(*__lowerCAmelCase , **__lowerCAmelCase )
@classmethod
def _UpperCAmelCase ( self: Optional[Any] , __lowerCAmelCase: List[Any] ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase = list(map(self , scheduler.lr_lambdas ) )
| 221 | 0 |
"""simple docstring"""
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class snake_case :
"""simple docstring"""
def __init__( self : Union[str, Any] ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : List[str]=13 ,lowerCamelCase__ : Optional[Any]=7 ,lowerCamelCase__ : Optional[Any]=True ,lowerCamelCase__ : Dict=True ,lowerCamelCase__ : str=True ,lowerCamelCase__ : Optional[int]=True ,lowerCamelCase__ : List[Any]=99 ,lowerCamelCase__ : Union[str, Any]=64 ,lowerCamelCase__ : Any=32 ,lowerCamelCase__ : List[Any]=5 ,lowerCamelCase__ : List[Any]=4 ,lowerCamelCase__ : Optional[int]=37 ,lowerCamelCase__ : Dict="gelu" ,lowerCamelCase__ : Union[str, Any]=0.1 ,lowerCamelCase__ : int=0.1 ,lowerCamelCase__ : Tuple=512 ,lowerCamelCase__ : Optional[Any]=16 ,lowerCamelCase__ : List[str]=2 ,lowerCamelCase__ : Any=0.0_2 ,lowerCamelCase__ : str=3 ,lowerCamelCase__ : Tuple=4 ,lowerCamelCase__ : int=None ,):
UpperCAmelCase__ = parent
UpperCAmelCase__ = batch_size
UpperCAmelCase__ = seq_length
UpperCAmelCase__ = is_training
UpperCAmelCase__ = use_input_mask
UpperCAmelCase__ = use_token_type_ids
UpperCAmelCase__ = use_labels
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = embedding_size
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = type_vocab_size
UpperCAmelCase__ = type_sequence_label_size
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = num_labels
UpperCAmelCase__ = num_choices
UpperCAmelCase__ = scope
def __lowerCAmelCase ( self : Optional[int] ):
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase__ = None
if self.use_input_mask:
UpperCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__ = None
if self.use_token_type_ids:
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
if self.use_labels:
UpperCAmelCase__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
UpperCAmelCase__ = ids_tensor([self.batch_size] ,self.num_choices )
UpperCAmelCase__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCAmelCase ( self : Tuple ):
return MobileBertConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,embedding_size=self.embedding_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=lowerCamelCase__ ,initializer_range=self.initializer_range ,)
def __lowerCAmelCase ( self : List[str] ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : Dict ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : Dict ):
UpperCAmelCase__ = MobileBertModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCAmelCase__ = model(lowerCamelCase__ ,attention_mask=lowerCamelCase__ ,token_type_ids=lowerCamelCase__ )
UpperCAmelCase__ = model(lowerCamelCase__ ,token_type_ids=lowerCamelCase__ )
UpperCAmelCase__ = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def __lowerCAmelCase ( self : List[str] ,lowerCamelCase__ : str ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : Tuple ):
UpperCAmelCase__ = MobileBertForMaskedLM(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCAmelCase__ = model(lowerCamelCase__ ,attention_mask=lowerCamelCase__ ,token_type_ids=lowerCamelCase__ ,labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self : Optional[Any] ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : Dict ,lowerCamelCase__ : int ,lowerCamelCase__ : Dict ,lowerCamelCase__ : int ,lowerCamelCase__ : Any ):
UpperCAmelCase__ = MobileBertForNextSentencePrediction(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCAmelCase__ = model(
lowerCamelCase__ ,attention_mask=lowerCamelCase__ ,token_type_ids=lowerCamelCase__ ,labels=lowerCamelCase__ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, 2) )
def __lowerCAmelCase ( self : Union[str, Any] ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : Any ,lowerCamelCase__ : int ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : Dict ):
UpperCAmelCase__ = MobileBertForPreTraining(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCAmelCase__ = model(
lowerCamelCase__ ,attention_mask=lowerCamelCase__ ,token_type_ids=lowerCamelCase__ ,labels=lowerCamelCase__ ,next_sentence_label=lowerCamelCase__ ,)
self.parent.assertEqual(result.prediction_logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape ,(self.batch_size, 2) )
def __lowerCAmelCase ( self : List[str] ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : str ,lowerCamelCase__ : Dict ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : Any ):
UpperCAmelCase__ = MobileBertForQuestionAnswering(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCAmelCase__ = model(
lowerCamelCase__ ,attention_mask=lowerCamelCase__ ,token_type_ids=lowerCamelCase__ ,start_positions=lowerCamelCase__ ,end_positions=lowerCamelCase__ ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self : Tuple ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : int ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : int ,lowerCamelCase__ : Any ,lowerCamelCase__ : List[Any] ):
UpperCAmelCase__ = self.num_labels
UpperCAmelCase__ = MobileBertForSequenceClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCAmelCase__ = model(lowerCamelCase__ ,attention_mask=lowerCamelCase__ ,token_type_ids=lowerCamelCase__ ,labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self : Union[str, Any] ,lowerCamelCase__ : Dict ,lowerCamelCase__ : str ,lowerCamelCase__ : int ,lowerCamelCase__ : int ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : int ):
UpperCAmelCase__ = self.num_labels
UpperCAmelCase__ = MobileBertForTokenClassification(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCAmelCase__ = model(lowerCamelCase__ ,attention_mask=lowerCamelCase__ ,token_type_ids=lowerCamelCase__ ,labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def __lowerCAmelCase ( self : int ,lowerCamelCase__ : Dict ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : Any ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : Any ,lowerCamelCase__ : str ):
UpperCAmelCase__ = self.num_choices
UpperCAmelCase__ = MobileBertForMultipleChoice(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCAmelCase__ = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
UpperCAmelCase__ = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
UpperCAmelCase__ = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
UpperCAmelCase__ = model(
lowerCamelCase__ ,attention_mask=lowerCamelCase__ ,token_type_ids=lowerCamelCase__ ,labels=lowerCamelCase__ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def __lowerCAmelCase ( self : List[str] ):
UpperCAmelCase__ = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) = config_and_inputs
UpperCAmelCase__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class snake_case ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
snake_case__ = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
snake_case__ = (
{
"feature-extraction": MobileBertModel,
"fill-mask": MobileBertForMaskedLM,
"question-answering": MobileBertForQuestionAnswering,
"text-classification": MobileBertForSequenceClassification,
"token-classification": MobileBertForTokenClassification,
"zero-shot": MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case__ = True
def __lowerCAmelCase ( self : Optional[Any] ,lowerCamelCase__ : Dict ,lowerCamelCase__ : Any ,lowerCamelCase__ : Optional[int]=False ):
UpperCAmelCase__ = super()._prepare_for_class(lowerCamelCase__ ,lowerCamelCase__ ,return_labels=lowerCamelCase__ )
if return_labels:
if model_class in get_values(lowerCamelCase__ ):
UpperCAmelCase__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) ,dtype=torch.long ,device=lowerCamelCase__ )
UpperCAmelCase__ = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=lowerCamelCase__ )
return inputs_dict
def __lowerCAmelCase ( self : List[str] ):
UpperCAmelCase__ = MobileBertModelTester(self )
UpperCAmelCase__ = ConfigTester(self ,config_class=lowerCamelCase__ ,hidden_size=37 )
def __lowerCAmelCase ( self : int ):
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self : Optional[Any] ):
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*lowerCamelCase__ )
def __lowerCAmelCase ( self : Dict ):
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*lowerCamelCase__ )
def __lowerCAmelCase ( self : List[str] ):
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*lowerCamelCase__ )
def __lowerCAmelCase ( self : Any ):
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*lowerCamelCase__ )
def __lowerCAmelCase ( self : List[Any] ):
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*lowerCamelCase__ )
def __lowerCAmelCase ( self : Dict ):
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*lowerCamelCase__ )
def __lowerCAmelCase ( self : List[str] ):
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*lowerCamelCase__ )
def __lowerCAmelCase ( self : Dict ):
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*lowerCamelCase__ )
def a_ ( lowerCamelCase ):
return torch.tensor(
lowerCamelCase , dtype=torch.long , device=lowerCamelCase , )
lowerCAmelCase__ : Dict = 1E-3
@require_torch
@require_sentencepiece
@require_tokenizers
class snake_case ( unittest.TestCase ):
"""simple docstring"""
@slow
def __lowerCAmelCase ( self : int ):
UpperCAmelCase__ = MobileBertModel.from_pretrained('google/mobilebert-uncased' ).to(lowerCamelCase__ )
UpperCAmelCase__ = _long_tensor([[101, 7_110, 1_005, 1_056, 2_023, 11_333, 17_413, 1_029, 102]] )
with torch.no_grad():
UpperCAmelCase__ = model(lowerCamelCase__ )[0]
UpperCAmelCase__ = torch.Size((1, 9, 512) )
self.assertEqual(output.shape ,lowerCamelCase__ )
UpperCAmelCase__ = torch.tensor(
[
[
[-2.473_6526e07, 8.269_1656e04, 1.652_1838e05],
[-5.754_1704e-01, 3.905_6022e00, 4.401_1507e00],
[2.604_7359e00, 1.567_7652e00, -1.732_4188e-01],
]
] ,device=lowerCamelCase__ ,)
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
UpperCAmelCase__ = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
UpperCAmelCase__ = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 632 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCAmelCase__ : int = {
'configuration_tapas': ['TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TapasConfig'],
'tokenization_tapas': ['TapasTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Union[str, Any] = [
'TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST',
'TapasForMaskedLM',
'TapasForQuestionAnswering',
'TapasForSequenceClassification',
'TapasModel',
'TapasPreTrainedModel',
'load_tf_weights_in_tapas',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Optional[Any] = [
'TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFTapasForMaskedLM',
'TFTapasForQuestionAnswering',
'TFTapasForSequenceClassification',
'TFTapasModel',
'TFTapasPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 632 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
UpperCAmelCase_ : str = logging.get_logger(__name__)
UpperCAmelCase_ : Tuple = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
UpperCAmelCase_ : int = {
'vocab_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt'
),
'squeezebert/squeezebert-mnli': 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt',
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli': (
'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json'
),
},
}
UpperCAmelCase_ : Tuple = {
'squeezebert/squeezebert-uncased': 512,
'squeezebert/squeezebert-mnli': 512,
'squeezebert/squeezebert-mnli-headless': 512,
}
UpperCAmelCase_ : Dict = {
'squeezebert/squeezebert-uncased': {'do_lower_case': True},
'squeezebert/squeezebert-mnli': {'do_lower_case': True},
'squeezebert/squeezebert-mnli-headless': {'do_lower_case': True},
}
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : Dict = VOCAB_FILES_NAMES
snake_case__ : int = PRETRAINED_VOCAB_FILES_MAP
snake_case__ : Dict = PRETRAINED_INIT_CONFIGURATION
snake_case__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ : Optional[Any] = SqueezeBertTokenizer
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str=None , SCREAMING_SNAKE_CASE__ : str=None , SCREAMING_SNAKE_CASE__ : List[str]=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]="[UNK]" , SCREAMING_SNAKE_CASE__ : Optional[Any]="[SEP]" , SCREAMING_SNAKE_CASE__ : Any="[PAD]" , SCREAMING_SNAKE_CASE__ : Any="[CLS]" , SCREAMING_SNAKE_CASE__ : Any="[MASK]" , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : int=None , **SCREAMING_SNAKE_CASE__ : Any , ) -> str:
super().__init__(
SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , do_lower_case=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , tokenize_chinese_chars=SCREAMING_SNAKE_CASE__ , strip_accents=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
a_ : str = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , SCREAMING_SNAKE_CASE__ ) != do_lower_case
or normalizer_state.get('strip_accents' , SCREAMING_SNAKE_CASE__ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , SCREAMING_SNAKE_CASE__ ) != tokenize_chinese_chars
):
a_ : Union[str, Any] = getattr(SCREAMING_SNAKE_CASE__ , normalizer_state.pop('type' ) )
a_ : int = do_lower_case
a_ : Tuple = strip_accents
a_ : int = tokenize_chinese_chars
a_ : int = normalizer_class(**SCREAMING_SNAKE_CASE__ )
a_ : Optional[Any] = do_lower_case
def SCREAMING_SNAKE_CASE ( self : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : str=None ) -> Optional[int]:
a_ : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ) -> List[int]:
a_ : Any = [self.sep_token_id]
a_ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def SCREAMING_SNAKE_CASE ( self : List[str] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ) -> Tuple[str]:
a_ : Dict = self._tokenizer.model.save(SCREAMING_SNAKE_CASE__ , name=SCREAMING_SNAKE_CASE__ )
return tuple(SCREAMING_SNAKE_CASE__ )
| 570 |
def SCREAMING_SNAKE_CASE_ ( __A : Dict ) -> List[str]:
"""simple docstring"""
a_ : int = 0
a_ : List[str] = len(__A )
for i in range(n - 1 ):
for j in range(i + 1 , __A ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def SCREAMING_SNAKE_CASE_ ( __A : Dict ) -> int:
"""simple docstring"""
if len(__A ) <= 1:
return arr, 0
a_ : str = len(__A ) // 2
a_ : List[str] = arr[0:mid]
a_ : List[str] = arr[mid:]
a_ , a_ : Tuple = count_inversions_recursive(__A )
a_ , a_ : int = count_inversions_recursive(__A )
a_ , a_ : Union[str, Any] = _count_cross_inversions(__A , __A )
a_ : Tuple = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def SCREAMING_SNAKE_CASE_ ( __A : List[Any] , __A : Dict ) -> Optional[int]:
"""simple docstring"""
a_ : Any = []
a_ : str = 0
while i < len(__A ) and j < len(__A ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(__A ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(__A ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def SCREAMING_SNAKE_CASE_ ( ) -> Optional[Any]:
"""simple docstring"""
a_ : Optional[Any] = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
a_ : Union[str, Any] = count_inversions_bf(__A )
a_ , a_ : Tuple = count_inversions_recursive(__A )
assert num_inversions_bf == num_inversions_recursive == 8
print('number of inversions = ' , __A )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
a_ : Optional[Any] = count_inversions_bf(__A )
a_ , a_ : int = count_inversions_recursive(__A )
assert num_inversions_bf == num_inversions_recursive == 0
print('number of inversions = ' , __A )
# an empty list should also have zero inversions
a_ : Tuple = []
a_ : Optional[int] = count_inversions_bf(__A )
a_ , a_ : Union[str, Any] = count_inversions_recursive(__A )
assert num_inversions_bf == num_inversions_recursive == 0
print('number of inversions = ' , __A )
if __name__ == "__main__":
main()
| 570 | 1 |
"""simple docstring"""
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def UpperCamelCase__ ( self ,lowerCamelCase_ ) -> List[str]:
with open(lowerCamelCase_ ,encoding="""utf-8""" ) as input_file:
A = re.compile(r"""(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)""" )
A = input_file.read()
A = regexp.search(lowerCamelCase_ )
return match
def UpperCamelCase__ ( self ,lowerCamelCase_ ) -> Any:
with open(lowerCamelCase_ ,encoding="""utf-8""" ) as input_file:
A = re.compile(r"""#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()""" ,re.DOTALL )
A = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
A = regexp.finditer(lowerCamelCase_ )
A = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def UpperCamelCase__ ( self ) -> Optional[int]:
A = Path("""./datasets""" )
A = list(dataset_paths.absolute().glob("""**/*.py""" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(lowerCamelCase_ ) ):
raise AssertionError(f'open(...) must use utf-8 encoding in {dataset}' )
def UpperCamelCase__ ( self ) -> Optional[int]:
A = Path("""./datasets""" )
A = list(dataset_paths.absolute().glob("""**/*.py""" ) )
for dataset in dataset_files:
if self._no_print_statements(str(lowerCamelCase_ ) ):
raise AssertionError(f'print statement found in {dataset}. Use datasets.logger/logging instead.' )
| 255 |
"""simple docstring"""
UpperCAmelCase =256
# Modulus to hash a string
UpperCAmelCase =1_000_003
def _A ( _a : str , _a : str ):
"""simple docstring"""
A = len(_a )
A = len(_a )
if p_len > t_len:
return False
A = 0
A = 0
A = 1
# Calculating the hash of pattern and substring of text
for i in range(_a ):
A = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
A = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
A = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
A = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def _A ( ):
"""simple docstring"""
A = """abc1abc12"""
A = """alskfjaldsabc1abc1abc12k23adsfabcabc"""
A = """alskfjaldsk23adsfabcabc"""
assert rabin_karp(_a , _a ) and not rabin_karp(_a , _a )
# Test 2)
A = """ABABX"""
A = """ABABZABABYABABX"""
assert rabin_karp(_a , _a )
# Test 3)
A = """AAAB"""
A = """ABAAAAAB"""
assert rabin_karp(_a , _a )
# Test 4)
A = """abcdabcy"""
A = """abcxabcdabxabcdabcdabcy"""
assert rabin_karp(_a , _a )
# Test 5)
A = """Lü"""
A = """Lüsai"""
assert rabin_karp(_a , _a )
A = """Lue"""
assert not rabin_karp(_a , _a )
print("""Success.""" )
if __name__ == "__main__":
test_rabin_karp()
| 255 | 1 |
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def a__ ( __UpperCamelCase ):
# A local function to see if a dot lands in the circle.
def is_in_circle(__UpperCamelCase , __UpperCamelCase ) -> bool:
SCREAMING_SNAKE_CASE_ = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
SCREAMING_SNAKE_CASE_ = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(A__ ) )
# The ratio of the area for circle to square is pi/4.
SCREAMING_SNAKE_CASE_ = proportion * 4
print(F'''The estimated value of pi is {pi_estimate}''' )
print(F'''The numpy value of pi is {pi}''' )
print(F'''The total error is {abs(pi - pi_estimate )}''' )
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 0.0 , __UpperCamelCase = 1.0 , ):
return mean(
function_to_integrate(uniform(A__ , A__ ) ) for _ in range(A__ ) ) * (max_value - min_value)
def a__ ( __UpperCamelCase , __UpperCamelCase = 0.0 , __UpperCamelCase = 1.0 ):
def identity_function(__UpperCamelCase ) -> float:
return x
SCREAMING_SNAKE_CASE_ = area_under_curve_estimator(
A__ , A__ , A__ , A__ )
SCREAMING_SNAKE_CASE_ = (max_value * max_value - min_value * min_value) / 2
print("******************" )
print(F'''Estimating area under y=x where x varies from {min_value} to {max_value}''' )
print(F'''Estimated value is {estimated_value}''' )
print(F'''Expected value is {expected_value}''' )
print(F'''Total error is {abs(estimated_value - expected_value )}''' )
print("******************" )
def a__ ( __UpperCamelCase ):
def function_to_integrate(__UpperCamelCase ) -> float:
return sqrt(4.0 - x * x )
SCREAMING_SNAKE_CASE_ = area_under_curve_estimator(
A__ , A__ , 0.0 , 2.0 )
print("******************" )
print("Estimating pi using area_under_curve_estimator" )
print(F'''Estimated value is {estimated_value}''' )
print(F'''Expected value is {pi}''' )
print(F'''Total error is {abs(estimated_value - pi )}''' )
print("******************" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 140 |
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
lowercase : Optional[Any] = get_logger(__name__)
class A__ :
"""simple docstring"""
__A : List[str] = '''dummy_data'''
__A : List[str] = '''datasets'''
__A : List[str] = False
def __init__( self , lowercase , lowercase , lowercase , lowercase = None , lowercase = False , lowercase = True , lowercase = None , ) -> Tuple:
'''simple docstring'''
a__ : List[Any] = 0
a__ : Any = dataset_name
a__ : int = cache_dir
a__ : Union[str, Any] = use_local_dummy_data
a__ : int = config
# download_callbacks take a single url as input
a__ : List[Callable] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
a__ : int = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
a__ : Tuple = str(lowercase)
# to be downloaded
a__ : str = None
a__ : Optional[Any] = None
@property
def __lowercase ( self) -> List[str]:
'''simple docstring'''
if self._dummy_file is None:
a__ : List[Any] = self.download_dummy_data()
return self._dummy_file
@property
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join('dummy' , self.config.name , self.version_name)
# structure is dummy / version_name
return os.path.join('dummy' , self.version_name)
@property
def __lowercase ( self) -> int:
'''simple docstring'''
return os.path.join(self.dummy_data_folder , 'dummy_data.zip')
def __lowercase ( self) -> Any:
'''simple docstring'''
a__ : Tuple = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
a__ : Dict = cached_path(
lowercase , cache_dir=self.cache_dir , extract_compressed_file=lowercase , force_extract=lowercase)
return os.path.join(lowercase , self.dummy_file_name)
@property
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file)
@property
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
if self._bucket_url is None:
a__ : Any = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , '/'))
return self._bucket_url
@property
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
if os.path.isdir(self.dummy_file):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , '/').split('/')[:-1])
def __lowercase ( self , lowercase , *lowercase) -> str:
'''simple docstring'''
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
a__ : List[Any] = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
a__ : Tuple = self.dummy_file_name
# special case when data_url is a dict
if isinstance(lowercase , lowercase):
return self.create_dummy_data_dict(lowercase , lowercase)
elif isinstance(lowercase , (list, tuple)):
return self.create_dummy_data_list(lowercase , lowercase)
else:
return self.create_dummy_data_single(lowercase , lowercase)
def __lowercase ( self , lowercase , *lowercase) -> Dict:
'''simple docstring'''
return self.download_and_extract(lowercase)
def __lowercase ( self , lowercase , lowercase) -> Optional[Any]:
'''simple docstring'''
return self.download_and_extract(lowercase)
def __lowercase ( self , lowercase , *lowercase , **lowercase) -> Optional[Any]:
'''simple docstring'''
return path
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
return {}
def __lowercase ( self , lowercase , lowercase) -> str:
'''simple docstring'''
a__ : Union[str, Any] = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(lowercase , lowercase):
for single_url in single_urls:
download_callback(lowercase)
else:
a__ : Any = single_urls
download_callback(lowercase)
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(lowercase , lowercase):
a__ : int = [os.path.join(lowercase , urllib.parse.quote_plus(Path(lowercase).name)) for x in single_urls]
else:
a__ : int = single_urls
a__ : Optional[int] = os.path.join(lowercase , urllib.parse.quote_plus(Path(lowercase).name))
a__ : Optional[Any] = value
# make sure that values are unique
if all(isinstance(lowercase , lowercase) for i in dummy_data_dict.values()) and len(set(dummy_data_dict.values())) < len(
dummy_data_dict.values()):
# append key to value to make its name unique
a__ : Dict = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def __lowercase ( self , lowercase , lowercase) -> List[str]:
'''simple docstring'''
a__ : Optional[int] = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
a__ : Optional[int] = all(bool(re.findall('[0-9]{3,}-of-[0-9]{3,}' , lowercase)) for url in data_url)
a__ : List[str] = all(
url.startswith('https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed') for url in data_url)
if data_url and (is_tf_records or is_pubmed_records):
a__ : str = [data_url[0]] * len(lowercase)
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(lowercase)
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
a__ : Tuple = os.path.join(lowercase , urllib.parse.quote_plus(single_url.split('/')[-1]))
dummy_data_list.append(lowercase)
return dummy_data_list
def __lowercase ( self , lowercase , lowercase) -> str:
'''simple docstring'''
for download_callback in self.download_callbacks:
download_callback(lowercase)
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
a__ : List[str] = os.path.join(lowercase , urllib.parse.quote_plus(data_url.split('/')[-1]))
if os.path.exists(lowercase) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def __lowercase ( self) -> int:
'''simple docstring'''
pass
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
pass
def __lowercase ( self , lowercase) -> Dict:
'''simple docstring'''
def _iter_archive_members(lowercase):
# this preserves the order of the members inside the ZIP archive
a__ : Any = Path(self.dummy_file).parent
a__ : Any = path.relative_to(lowercase)
with ZipFile(self.local_path_to_dummy_data) as zip_file:
a__ : Tuple = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix()):
yield dummy_parent_path.joinpath(lowercase)
a__ : Optional[Any] = Path(lowercase)
a__ : List[str] = _iter_archive_members(lowercase) if self.use_local_dummy_data else path.rglob('*')
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith(('.', '__')):
yield file_path.relative_to(lowercase).as_posix(), file_path.open('rb')
def __lowercase ( self , lowercase) -> int:
'''simple docstring'''
if not isinstance(lowercase , lowercase):
a__ : List[Any] = [paths]
for path in paths:
if os.path.isfile(lowercase):
if os.path.basename(lowercase).startswith(('.', '__')):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(lowercase):
if os.path.basename(lowercase).startswith(('.', '__')):
continue
dirnames.sort()
for filename in sorted(lowercase):
if filename.startswith(('.', '__')):
continue
yield os.path.join(lowercase , lowercase)
| 302 | 0 |
"""simple docstring"""
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : Optional[int] ="M-CLIP"
def __init__( self , snake_case__=1_024 , snake_case__=768 , **snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = transformerDimSize
lowerCAmelCase : Optional[int] = imageDimSize
super().__init__(**snake_case__ )
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : Tuple =MCLIPConfig
def __init__( self , snake_case__ , *snake_case__ , **snake_case__ ):
"""simple docstring"""
super().__init__(snake_case__ , *snake_case__ , **snake_case__ )
lowerCAmelCase : Dict = XLMRobertaModel(snake_case__ )
lowerCAmelCase : Tuple = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def lowercase__ ( self , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Dict = self.transformer(input_ids=snake_case__ , attention_mask=snake_case__ )[0]
lowerCAmelCase : str = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(snake_case__ ), embs
| 681 |
"""simple docstring"""
import math
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
return math.sqrt(SCREAMING_SNAKE_CASE ) * math.sqrt(SCREAMING_SNAKE_CASE ) == num
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
lowerCAmelCase : Dict = 0
lowerCAmelCase : List[str] = n
while left <= right:
lowerCAmelCase : str = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
lowerCAmelCase : int = mid - 1
else:
lowerCAmelCase : int = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 681 | 1 |
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
lowercase_ : str = input('Enter image url: ').strip()
print(f'''Downloading image from {url} ...''')
lowercase_ : List[Any] = BeautifulSoup(requests.get(url).content, 'html.parser')
# The image URL is in the content field of the first meta tag with property og:image
lowercase_ : int = soup.find('meta', {'property': 'og:image'})['content']
lowercase_ : int = requests.get(image_url).content
lowercase_ : Tuple = f'''{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg'''
with open(file_name, 'wb') as fp:
fp.write(image_data)
print(f'''Done. Image saved to disk as {file_name}.''')
| 64 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
lowercase_ : List[Any] = {
'configuration_speech_to_text': ['SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Speech2TextConfig'],
'processing_speech_to_text': ['Speech2TextProcessor'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Optional[Any] = ['Speech2TextTokenizer']
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Any = ['Speech2TextFeatureExtractor']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Optional[int] = [
'TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFSpeech2TextForConditionalGeneration',
'TFSpeech2TextModel',
'TFSpeech2TextPreTrainedModel',
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Optional[int] = [
'SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Speech2TextForConditionalGeneration',
'Speech2TextModel',
'Speech2TextPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
lowercase_ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 64 | 1 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger(__name__)
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> str:
_SCREAMING_SNAKE_CASE : Optional[Any] = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
_SCREAMING_SNAKE_CASE : Tuple = [144, 192, 240]
_SCREAMING_SNAKE_CASE : str = [16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
_SCREAMING_SNAKE_CASE : List[Any] = [96, 120, 144]
_SCREAMING_SNAKE_CASE : Optional[int] = [16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
_SCREAMING_SNAKE_CASE : Union[str, Any] = [64, 80, 96]
_SCREAMING_SNAKE_CASE : Tuple = [16, 16, 24, 48, 64, 80, 320]
_SCREAMING_SNAKE_CASE : Any = 0.05
_SCREAMING_SNAKE_CASE : Tuple = 2.0
if mobilevit_name.startswith("""deeplabv3_""" ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = 512
_SCREAMING_SNAKE_CASE : Optional[int] = 16
_SCREAMING_SNAKE_CASE : Optional[Any] = 21
_SCREAMING_SNAKE_CASE : Dict = """pascal-voc-id2label.json"""
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = 1_000
_SCREAMING_SNAKE_CASE : Tuple = """imagenet-1k-id2label.json"""
_SCREAMING_SNAKE_CASE : Tuple = """huggingface/label-files"""
_SCREAMING_SNAKE_CASE : Optional[Any] = json.load(open(hf_hub_download(__A , __A , repo_type="""dataset""" ) , """r""" ) )
_SCREAMING_SNAKE_CASE : Dict = {int(__A ): v for k, v in idalabel.items()}
_SCREAMING_SNAKE_CASE : Tuple = idalabel
_SCREAMING_SNAKE_CASE : Union[str, Any] = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False )-> str:
for i in range(1 , 6 ):
if F"""layer_{i}.""" in name:
_SCREAMING_SNAKE_CASE : Tuple = name.replace(F"""layer_{i}.""" , F"""encoder.layer.{i - 1}.""" )
if "conv_1." in name:
_SCREAMING_SNAKE_CASE : Any = name.replace("""conv_1.""" , """conv_stem.""" )
if ".block." in name:
_SCREAMING_SNAKE_CASE : Tuple = name.replace(""".block.""" , """.""" )
if "exp_1x1" in name:
_SCREAMING_SNAKE_CASE : Optional[int] = name.replace("""exp_1x1""" , """expand_1x1""" )
if "red_1x1" in name:
_SCREAMING_SNAKE_CASE : Tuple = name.replace("""red_1x1""" , """reduce_1x1""" )
if ".local_rep.conv_3x3." in name:
_SCREAMING_SNAKE_CASE : List[Any] = name.replace(""".local_rep.conv_3x3.""" , """.conv_kxk.""" )
if ".local_rep.conv_1x1." in name:
_SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace(""".local_rep.conv_1x1.""" , """.conv_1x1.""" )
if ".norm." in name:
_SCREAMING_SNAKE_CASE : Optional[int] = name.replace(""".norm.""" , """.normalization.""" )
if ".conv." in name:
_SCREAMING_SNAKE_CASE : List[Any] = name.replace(""".conv.""" , """.convolution.""" )
if ".conv_proj." in name:
_SCREAMING_SNAKE_CASE : Optional[int] = name.replace(""".conv_proj.""" , """.conv_projection.""" )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if F""".{i}.{j}.""" in name:
_SCREAMING_SNAKE_CASE : str = name.replace(F""".{i}.{j}.""" , F""".{i}.layer.{j}.""" )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if F""".{i}.{j}.""" in name:
_SCREAMING_SNAKE_CASE : Dict = name.replace(F""".{i}.{j}.""" , F""".{i}.""" )
if "expand_1x1" in name:
_SCREAMING_SNAKE_CASE : List[Any] = name.replace("""expand_1x1""" , """downsampling_layer.expand_1x1""" )
if "conv_3x3" in name:
_SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace("""conv_3x3""" , """downsampling_layer.conv_3x3""" )
if "reduce_1x1" in name:
_SCREAMING_SNAKE_CASE : int = name.replace("""reduce_1x1""" , """downsampling_layer.reduce_1x1""" )
for i in range(2 , 5 ):
if F""".global_rep.{i}.weight""" in name:
_SCREAMING_SNAKE_CASE : int = name.replace(F""".global_rep.{i}.weight""" , """.layernorm.weight""" )
if F""".global_rep.{i}.bias""" in name:
_SCREAMING_SNAKE_CASE : Optional[int] = name.replace(F""".global_rep.{i}.bias""" , """.layernorm.bias""" )
if ".global_rep." in name:
_SCREAMING_SNAKE_CASE : Optional[int] = name.replace(""".global_rep.""" , """.transformer.""" )
if ".pre_norm_mha.0." in name:
_SCREAMING_SNAKE_CASE : Tuple = name.replace(""".pre_norm_mha.0.""" , """.layernorm_before.""" )
if ".pre_norm_mha.1.out_proj." in name:
_SCREAMING_SNAKE_CASE : Dict = name.replace(""".pre_norm_mha.1.out_proj.""" , """.attention.output.dense.""" )
if ".pre_norm_ffn.0." in name:
_SCREAMING_SNAKE_CASE : int = name.replace(""".pre_norm_ffn.0.""" , """.layernorm_after.""" )
if ".pre_norm_ffn.1." in name:
_SCREAMING_SNAKE_CASE : Optional[Any] = name.replace(""".pre_norm_ffn.1.""" , """.intermediate.dense.""" )
if ".pre_norm_ffn.4." in name:
_SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace(""".pre_norm_ffn.4.""" , """.output.dense.""" )
if ".transformer." in name:
_SCREAMING_SNAKE_CASE : str = name.replace(""".transformer.""" , """.transformer.layer.""" )
if ".aspp_layer." in name:
_SCREAMING_SNAKE_CASE : Optional[int] = name.replace(""".aspp_layer.""" , """.""" )
if ".aspp_pool." in name:
_SCREAMING_SNAKE_CASE : Optional[int] = name.replace(""".aspp_pool.""" , """.""" )
if "seg_head." in name:
_SCREAMING_SNAKE_CASE : int = name.replace("""seg_head.""" , """segmentation_head.""" )
if "segmentation_head.classifier.classifier." in name:
_SCREAMING_SNAKE_CASE : List[str] = name.replace("""segmentation_head.classifier.classifier.""" , """segmentation_head.classifier.""" )
if "classifier.fc." in name:
_SCREAMING_SNAKE_CASE : Any = name.replace("""classifier.fc.""" , """classifier.""" )
elif (not base_model) and ("segmentation_head." not in name):
_SCREAMING_SNAKE_CASE : Optional[Any] = """mobilevit.""" + name
return name
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False )-> int:
if base_model:
_SCREAMING_SNAKE_CASE : Union[str, Any] = """"""
else:
_SCREAMING_SNAKE_CASE : Dict = """mobilevit."""
for key in orig_state_dict.copy().keys():
_SCREAMING_SNAKE_CASE : Optional[Any] = orig_state_dict.pop(__A )
if key[:8] == "encoder.":
_SCREAMING_SNAKE_CASE : str = key[8:]
if "qkv" in key:
_SCREAMING_SNAKE_CASE : List[str] = key.split(""".""" )
_SCREAMING_SNAKE_CASE : List[Any] = int(key_split[0][6:] ) - 1
_SCREAMING_SNAKE_CASE : str = int(key_split[3] )
_SCREAMING_SNAKE_CASE : Any = model.get_submodule(F"""{model_prefix}encoder.layer.{layer_num}""" )
_SCREAMING_SNAKE_CASE : Tuple = layer.transformer.layer[transformer_num].attention.attention.all_head_size
_SCREAMING_SNAKE_CASE : int = (
F"""{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention."""
)
if "weight" in key:
_SCREAMING_SNAKE_CASE : Union[str, Any] = val[:dim, :]
_SCREAMING_SNAKE_CASE : Tuple = val[dim : dim * 2, :]
_SCREAMING_SNAKE_CASE : Any = val[-dim:, :]
else:
_SCREAMING_SNAKE_CASE : Any = val[:dim]
_SCREAMING_SNAKE_CASE : str = val[dim : dim * 2]
_SCREAMING_SNAKE_CASE : str = val[-dim:]
else:
_SCREAMING_SNAKE_CASE : List[str] = val
return orig_state_dict
def lowerCamelCase_()-> Any:
_SCREAMING_SNAKE_CASE : Dict = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_SCREAMING_SNAKE_CASE : Any = Image.open(requests.get(__A , stream=__A ).raw )
return im
@torch.no_grad()
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False )-> Dict:
_SCREAMING_SNAKE_CASE : List[str] = get_mobilevit_config(__A )
# load original state_dict
_SCREAMING_SNAKE_CASE : int = torch.load(__A , map_location="""cpu""" )
# load 🤗 model
if mobilevit_name.startswith("""deeplabv3_""" ):
_SCREAMING_SNAKE_CASE : List[str] = MobileViTForSemanticSegmentation(__A ).eval()
else:
_SCREAMING_SNAKE_CASE : List[str] = MobileViTForImageClassification(__A ).eval()
_SCREAMING_SNAKE_CASE : List[str] = convert_state_dict(__A , __A )
model.load_state_dict(__A )
# Check outputs on an image, prepared by MobileViTImageProcessor
_SCREAMING_SNAKE_CASE : int = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
_SCREAMING_SNAKE_CASE : Any = image_processor(images=prepare_img() , return_tensors="""pt""" )
_SCREAMING_SNAKE_CASE : Any = model(**__A )
_SCREAMING_SNAKE_CASE : Any = outputs.logits
if mobilevit_name.startswith("""deeplabv3_""" ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
_SCREAMING_SNAKE_CASE : List[str] = torch.tensor(
[
[[6.20_65, 6.12_92, 6.20_70], [6.10_79, 6.12_54, 6.17_47], [6.00_42, 6.10_71, 6.10_34]],
[[-6.92_53, -6.86_53, -7.03_98], [-7.32_18, -7.39_83, -7.36_70], [-7.19_61, -7.24_82, -7.15_69]],
[[-4.47_23, -4.43_48, -4.37_69], [-5.36_29, -5.46_32, -5.45_98], [-5.15_87, -5.34_02, -5.50_59]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
_SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(
[
[[5.44_49, 5.57_33, 5.63_14], [5.18_15, 5.39_30, 5.59_63], [5.16_56, 5.43_33, 5.48_53]],
[[-9.44_23, -9.77_66, -9.67_14], [-9.15_81, -9.57_20, -9.55_19], [-9.10_06, -9.64_58, -9.57_03]],
[[-7.77_21, -7.37_16, -7.15_83], [-8.45_99, -8.06_24, -7.79_44], [-8.41_72, -7.83_66, -7.50_25]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
_SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(
[
[[6.98_11, 6.97_43, 7.31_23], [7.17_77, 7.19_31, 7.39_38], [7.56_33, 7.80_50, 7.89_01]],
[[-10.55_36, -10.23_32, -10.29_24], [-10.23_36, -9.86_24, -9.59_64], [-10.88_40, -10.81_58, -10.66_59]],
[[-3.49_38, -3.06_31, -2.86_20], [-3.42_05, -2.81_35, -2.68_75], [-3.41_79, -2.79_45, -2.87_50]],
] )
else:
raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3, :3, :3] , __A , atol=1e-4 )
else:
assert logits.shape == (1, 1_000)
if mobilevit_name == "mobilevit_s":
_SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([-0.98_66, 0.23_92, -1.12_41] )
elif mobilevit_name == "mobilevit_xs":
_SCREAMING_SNAKE_CASE : Dict = torch.tensor([-2.47_61, -0.93_99, -1.95_87] )
elif mobilevit_name == "mobilevit_xxs":
_SCREAMING_SNAKE_CASE : List[str] = torch.tensor([-1.93_64, -1.23_27, -0.46_53] )
else:
raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3] , __A , atol=1e-4 )
Path(__A ).mkdir(exist_ok=__A )
print(F"""Saving model {mobilevit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__A )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__A )
if push_to_hub:
_SCREAMING_SNAKE_CASE : Optional[Any] = {
"""mobilevit_s""": """mobilevit-small""",
"""mobilevit_xs""": """mobilevit-x-small""",
"""mobilevit_xxs""": """mobilevit-xx-small""",
"""deeplabv3_mobilevit_s""": """deeplabv3-mobilevit-small""",
"""deeplabv3_mobilevit_xs""": """deeplabv3-mobilevit-x-small""",
"""deeplabv3_mobilevit_xxs""": """deeplabv3-mobilevit-xx-small""",
}
print("""Pushing to the hub...""" )
_SCREAMING_SNAKE_CASE : Dict = model_mapping[mobilevit_name]
image_processor.push_to_hub(__A , organization="""apple""" )
model.push_to_hub(__A , organization="""apple""" )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--mobilevit_name''',
default='''mobilevit_s''',
type=str,
help=(
'''Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\','''
''' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowerCAmelCase_ = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 715 | """simple docstring"""
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCAmelCase_ = 16
lowerCAmelCase_ = 32
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 16 )-> str:
_SCREAMING_SNAKE_CASE : int = AutoTokenizer.from_pretrained("""bert-base-cased""" )
_SCREAMING_SNAKE_CASE : Union[str, Any] = DatasetDict(
{
"""train""": dataset["""train"""].select(__SCREAMING_SNAKE_CASE ),
"""validation""": dataset["""train"""].select(__SCREAMING_SNAKE_CASE ),
"""test""": dataset["""validation"""],
} )
def tokenize_function(__SCREAMING_SNAKE_CASE ):
# max_length=None => use the model max length (it's actually the default)
_SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_SCREAMING_SNAKE_CASE : str = datasets.map(
__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_SCREAMING_SNAKE_CASE : Any = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(__SCREAMING_SNAKE_CASE ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_SCREAMING_SNAKE_CASE : Any = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_SCREAMING_SNAKE_CASE : Optional[Any] = 16
elif accelerator.mixed_precision != "no":
_SCREAMING_SNAKE_CASE : Any = 8
else:
_SCREAMING_SNAKE_CASE : Optional[int] = None
return tokenizer.pad(
__SCREAMING_SNAKE_CASE , padding="""longest""" , max_length=__SCREAMING_SNAKE_CASE , pad_to_multiple_of=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" , )
# Instantiate dataloaders.
_SCREAMING_SNAKE_CASE : int = DataLoader(
tokenized_datasets["""train"""] , shuffle=__SCREAMING_SNAKE_CASE , collate_fn=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Optional[int] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=__SCREAMING_SNAKE_CASE , collate_fn=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Dict = DataLoader(
tokenized_datasets["""test"""] , shuffle=__SCREAMING_SNAKE_CASE , collate_fn=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader, test_dataloader
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Dict:
# New Code #
_SCREAMING_SNAKE_CASE : Union[str, Any] = []
# Download the dataset
_SCREAMING_SNAKE_CASE : Union[str, Any] = load_dataset("""glue""" , """mrpc""" )
# Create our splits
_SCREAMING_SNAKE_CASE : Dict = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
_SCREAMING_SNAKE_CASE : Any = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_SCREAMING_SNAKE_CASE : Tuple = config["""lr"""]
_SCREAMING_SNAKE_CASE : Tuple = int(config["""num_epochs"""] )
_SCREAMING_SNAKE_CASE : int = int(config["""seed"""] )
_SCREAMING_SNAKE_CASE : int = int(config["""batch_size"""] )
_SCREAMING_SNAKE_CASE : List[str] = evaluate.load("""glue""" , """mrpc""" )
# If the batch size is too big we use gradient accumulation
_SCREAMING_SNAKE_CASE : Any = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
_SCREAMING_SNAKE_CASE : List[str] = batch_size // MAX_GPU_BATCH_SIZE
_SCREAMING_SNAKE_CASE : List[str] = MAX_GPU_BATCH_SIZE
set_seed(__SCREAMING_SNAKE_CASE )
# New Code #
# Create our folds:
_SCREAMING_SNAKE_CASE : List[str] = kfold.split(np.zeros(datasets["""train"""].num_rows ) , datasets["""train"""]["""label"""] )
_SCREAMING_SNAKE_CASE : Optional[Any] = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = get_fold_dataloaders(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_SCREAMING_SNAKE_CASE : Any = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=__SCREAMING_SNAKE_CASE )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_SCREAMING_SNAKE_CASE : Tuple = model.to(accelerator.device )
# Instantiate optimizer
_SCREAMING_SNAKE_CASE : int = AdamW(params=model.parameters() , lr=__SCREAMING_SNAKE_CASE )
# Instantiate scheduler
_SCREAMING_SNAKE_CASE : int = get_linear_schedule_with_warmup(
optimizer=__SCREAMING_SNAKE_CASE , num_warmup_steps=100 , num_training_steps=(len(__SCREAMING_SNAKE_CASE ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = accelerator.prepare(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Now we train the model
for epoch in range(__SCREAMING_SNAKE_CASE ):
model.train()
for step, batch in enumerate(__SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_SCREAMING_SNAKE_CASE : Optional[Any] = model(**__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Dict = outputs.loss
_SCREAMING_SNAKE_CASE : List[Any] = loss / gradient_accumulation_steps
accelerator.backward(__SCREAMING_SNAKE_CASE )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_SCREAMING_SNAKE_CASE : List[str] = model(**__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : str = outputs.logits.argmax(dim=-1 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=__SCREAMING_SNAKE_CASE , references=__SCREAMING_SNAKE_CASE , )
_SCREAMING_SNAKE_CASE : Optional[int] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , __SCREAMING_SNAKE_CASE )
# New Code #
# We also run predictions on the test set at the very end
_SCREAMING_SNAKE_CASE : str = []
for step, batch in enumerate(__SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_SCREAMING_SNAKE_CASE : List[str] = model(**__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : List[Any] = outputs.logits
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(__SCREAMING_SNAKE_CASE , dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
_SCREAMING_SNAKE_CASE : Optional[int] = torch.cat(__SCREAMING_SNAKE_CASE , dim=0 )
_SCREAMING_SNAKE_CASE : List[str] = torch.stack(__SCREAMING_SNAKE_CASE , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
_SCREAMING_SNAKE_CASE : int = metric.compute(predictions=__SCREAMING_SNAKE_CASE , references=__SCREAMING_SNAKE_CASE )
accelerator.print("""Average test metrics from all folds:""" , __SCREAMING_SNAKE_CASE )
def lowerCamelCase_()-> Optional[Any]:
_SCREAMING_SNAKE_CASE : Any = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
# New Code #
parser.add_argument("""--num_folds""" , type=__SCREAMING_SNAKE_CASE , default=3 , help="""The number of splits to perform across the dataset""" )
_SCREAMING_SNAKE_CASE : List[Any] = parser.parse_args()
_SCREAMING_SNAKE_CASE : Optional[int] = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 635 | 0 |
"""simple docstring"""
def lowerCAmelCase_ ( UpperCamelCase__ : int ):
"""simple docstring"""
assert (
isinstance(UpperCamelCase__ , UpperCamelCase__ ) and number_of_steps > 0
), f'''number_of_steps needs to be positive integer, your input {number_of_steps}'''
if number_of_steps == 1:
return 1
__lowercase , __lowercase = 1, 1
for _ in range(number_of_steps - 1 ):
__lowercase , __lowercase = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 616 |
"""simple docstring"""
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class lowerCamelCase__ ( _a , _a , unittest.TestCase ):
a : Any = IFPipeline
a : str = TEXT_TO_IMAGE_PARAMS - {"""width""", """height""", """latents"""}
a : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
a : int = PipelineTesterMixin.required_optional_params - {"""latents"""}
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
'''simple docstring'''
return self._get_dummy_components()
def SCREAMING_SNAKE_CASE_ ( self : Any , A_ : int , A_ : Dict=0 ):
'''simple docstring'''
if str(A_ ).startswith("""mps""" ):
__lowercase = torch.manual_seed(A_ )
else:
__lowercase = torch.Generator(device=A_ ).manual_seed(A_ )
__lowercase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1e-1 )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
self._test_save_load_local()
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@slow
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
__lowercase = IFPipeline.from_pretrained("""DeepFloyd/IF-I-XL-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa )
__lowercase = IFSuperResolutionPipeline.from_pretrained(
"""DeepFloyd/IF-II-L-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa , text_encoder=A_ , tokenizer=A_ )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to("""cuda""" )
__lowercase , __lowercase = pipe_a.encode_prompt("""anime turtle""" , device="""cuda""" )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
__lowercase = None
__lowercase = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(A_ , A_ , A_ , A_ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
__lowercase = IFImgaImgPipeline(**pipe_a.components )
__lowercase = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(A_ , A_ , A_ , A_ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
__lowercase = IFInpaintingPipeline(**pipe_a.components )
__lowercase = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(A_ , A_ , A_ , A_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , A_ : Any , A_ : int , A_ : str , A_ : Dict ):
'''simple docstring'''
_start_torch_memory_measurement()
__lowercase = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowercase = pipe_a(
prompt_embeds=A_ , negative_prompt_embeds=A_ , num_inference_steps=2 , generator=A_ , output_type="""np""" , )
__lowercase = output.images[0]
assert image.shape == (6_4, 6_4, 3)
__lowercase = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_3 * 1_0**9
__lowercase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy""" )
assert_mean_pixel_difference(A_ , A_ )
# pipeline 2
_start_torch_memory_measurement()
__lowercase = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowercase = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(A_ )
__lowercase = pipe_a(
prompt_embeds=A_ , negative_prompt_embeds=A_ , image=A_ , generator=A_ , num_inference_steps=2 , output_type="""np""" , )
__lowercase = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
__lowercase = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
__lowercase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(A_ , A_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , A_ : Tuple , A_ : List[Any] , A_ : List[Any] , A_ : Any ):
'''simple docstring'''
_start_torch_memory_measurement()
__lowercase = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(A_ )
__lowercase = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowercase = pipe_a(
prompt_embeds=A_ , negative_prompt_embeds=A_ , image=A_ , num_inference_steps=2 , generator=A_ , output_type="""np""" , )
__lowercase = output.images[0]
assert image.shape == (6_4, 6_4, 3)
__lowercase = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_0 * 1_0**9
__lowercase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy""" )
assert_mean_pixel_difference(A_ , A_ )
# pipeline 2
_start_torch_memory_measurement()
__lowercase = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowercase = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(0 ) ).to(A_ )
__lowercase = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(A_ )
__lowercase = pipe_a(
prompt_embeds=A_ , negative_prompt_embeds=A_ , image=A_ , original_image=A_ , generator=A_ , num_inference_steps=2 , output_type="""np""" , )
__lowercase = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
__lowercase = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
__lowercase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(A_ , A_ )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , A_ : List[Any] , A_ : str , A_ : List[Any] , A_ : List[Any] ):
'''simple docstring'''
_start_torch_memory_measurement()
__lowercase = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(A_ )
__lowercase = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(1 ) ).to(A_ )
__lowercase = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowercase = pipe_a(
prompt_embeds=A_ , negative_prompt_embeds=A_ , image=A_ , mask_image=A_ , num_inference_steps=2 , generator=A_ , output_type="""np""" , )
__lowercase = output.images[0]
assert image.shape == (6_4, 6_4, 3)
__lowercase = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_0 * 1_0**9
__lowercase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy""" )
assert_mean_pixel_difference(A_ , A_ )
# pipeline 2
_start_torch_memory_measurement()
__lowercase = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowercase = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(A_ )
__lowercase = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(0 ) ).to(A_ )
__lowercase = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(1 ) ).to(A_ )
__lowercase = pipe_a(
prompt_embeds=A_ , negative_prompt_embeds=A_ , image=A_ , mask_image=A_ , original_image=A_ , generator=A_ , num_inference_steps=2 , output_type="""np""" , )
__lowercase = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
__lowercase = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
__lowercase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(A_ , A_ )
def lowerCAmelCase_ ( ):
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 616 | 1 |
'''simple docstring'''
import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowercase = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class __lowerCamelCase ( __lowercase , unittest.TestCase ):
'''simple docstring'''
snake_case__ : List[Any] = ReformerTokenizer
snake_case__ : List[str] = ReformerTokenizerFast
snake_case__ : int = True
snake_case__ : List[Any] = False
snake_case__ : Any = True
def a_ ( self ):
super().setUp()
__SCREAMING_SNAKE_CASE : Tuple = ReformerTokenizer(a__ , keep_accents=a__ )
tokenizer.save_pretrained(self.tmpdirname )
def a_ ( self ):
__SCREAMING_SNAKE_CASE : Optional[int] = "<s>"
__SCREAMING_SNAKE_CASE : Tuple = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a__ ) , a__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a__ ) , a__ )
def a_ ( self ):
__SCREAMING_SNAKE_CASE : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(a__ ) , 1000 )
def a_ ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def a_ ( self ):
if not self.test_rust_tokenizer:
return
__SCREAMING_SNAKE_CASE : str = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : str = self.get_rust_tokenizer()
__SCREAMING_SNAKE_CASE : Optional[int] = "I was born in 92000, and this is falsé."
__SCREAMING_SNAKE_CASE : List[str] = tokenizer.tokenize(a__ )
__SCREAMING_SNAKE_CASE : Any = rust_tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
__SCREAMING_SNAKE_CASE : Dict = tokenizer.encode(a__ , add_special_tokens=a__ )
__SCREAMING_SNAKE_CASE : List[str] = rust_tokenizer.encode(a__ , add_special_tokens=a__ )
self.assertListEqual(a__ , a__ )
__SCREAMING_SNAKE_CASE : Any = self.get_rust_tokenizer()
__SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode(a__ )
__SCREAMING_SNAKE_CASE : Optional[int] = rust_tokenizer.encode(a__ )
self.assertListEqual(a__ , a__ )
def a_ ( self , a__=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__SCREAMING_SNAKE_CASE : Optional[Any] = self.rust_tokenizer_class.from_pretrained(a__ , **a__ )
# Simple input
__SCREAMING_SNAKE_CASE : Union[str, Any] = "This is a simple input"
__SCREAMING_SNAKE_CASE : List[Any] = ["This is a simple input 1", "This is a simple input 2"]
__SCREAMING_SNAKE_CASE : Tuple = ("This is a simple input", "This is a pair")
__SCREAMING_SNAKE_CASE : str = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(a__ , tokenizer_r.encode , a__ , max_length=a__ , padding="max_length" )
# Simple input
self.assertRaises(a__ , tokenizer_r.encode_plus , a__ , max_length=a__ , padding="max_length" )
# Simple input
self.assertRaises(
a__ , tokenizer_r.batch_encode_plus , a__ , max_length=a__ , padding="max_length" , )
# Pair input
self.assertRaises(a__ , tokenizer_r.encode , a__ , max_length=a__ , padding="max_length" )
# Pair input
self.assertRaises(a__ , tokenizer_r.encode_plus , a__ , max_length=a__ , padding="max_length" )
# Pair input
self.assertRaises(
a__ , tokenizer_r.batch_encode_plus , a__ , max_length=a__ , padding="max_length" , )
def a_ ( self ):
pass
def a_ ( self ):
__SCREAMING_SNAKE_CASE : List[Any] = ReformerTokenizer(a__ , keep_accents=a__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.tokenize("This is a test" )
self.assertListEqual(a__ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a__ ) , [285, 46, 10, 170, 382] , )
__SCREAMING_SNAKE_CASE : Dict = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
a__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.convert_tokens_to_ids(a__ )
self.assertListEqual(
a__ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
__SCREAMING_SNAKE_CASE : List[Any] = tokenizer.convert_ids_to_tokens(a__ )
self.assertListEqual(
a__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def a_ ( self ):
return ReformerTokenizer.from_pretrained("google/reformer-crime-and-punishment" )
@slow
def a_ ( self ):
__SCREAMING_SNAKE_CASE : int = "Hello World!"
__SCREAMING_SNAKE_CASE : Optional[Any] = [126, 32, 262, 152, 38, 72, 287]
self.assertListEqual(a__ , self.big_tokenizer.encode(a__ ) )
@slow
def a_ ( self ):
__SCREAMING_SNAKE_CASE : List[str] = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
__SCREAMING_SNAKE_CASE : Tuple = [
108,
265,
24,
111,
4,
258,
156,
35,
28,
275,
3,
259,
297,
260,
84,
4,
35,
110,
44,
8,
259,
91,
268,
21,
11,
209,
274,
109,
266,
277,
117,
86,
93,
315,
258,
278,
258,
277,
258,
0,
258,
288,
258,
319,
258,
0,
258,
0,
258,
0,
258,
0,
258,
287,
258,
315,
258,
289,
258,
278,
99,
269,
266,
262,
8,
259,
241,
4,
217,
230,
268,
266,
55,
168,
106,
75,
193,
266,
223,
27,
49,
26,
282,
25,
264,
299,
19,
26,
0,
258,
277,
117,
86,
93,
176,
183,
270,
11,
262,
42,
61,
265,
]
self.assertListEqual(a__ , self.big_tokenizer.encode(a__ ) )
@require_torch
@slow
def a_ ( self ):
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
__SCREAMING_SNAKE_CASE : str = list(self.big_tokenizer.get_vocab().keys() )[:10]
__SCREAMING_SNAKE_CASE : Optional[Any] = " ".join(a__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.big_tokenizer.encode_plus(a__ , return_tensors="pt" )
__SCREAMING_SNAKE_CASE : Dict = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors="pt" )
__SCREAMING_SNAKE_CASE : Tuple = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
__SCREAMING_SNAKE_CASE : Optional[Any] = encoded_sequence["input_ids"].shape
__SCREAMING_SNAKE_CASE : Optional[Any] = ReformerModel(a__ )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**a__ )
model(**a__ )
@slow
def a_ ( self ):
# fmt: off
__SCREAMING_SNAKE_CASE : str = {"input_ids": [[108, 265, 24, 111, 4, 258, 156, 7, 51, 279, 58, 7, 76, 25, 69, 278], [140, 243, 264, 134, 17, 267, 77, 263, 22, 262, 297, 258, 304, 177, 279, 266, 14, 89, 13, 35, 261, 299, 272, 137, 275, 278]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
__SCREAMING_SNAKE_CASE : List[Any] = [
"This is a very simple sentence.",
"The quick brown fox jumps over the lazy dog.",
]
self.tokenizer_integration_test_util(
expected_encoding=a__ , model_name="google/reformer-crime-and-punishment" , revision="0e6c3decb8211d49bf881013425dc8b0448b3f5a" , padding=a__ , sequences=a__ , )
| 706 |
'''simple docstring'''
import math
def __A ( _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = [True] * n
__SCREAMING_SNAKE_CASE : Optional[int] = False
__SCREAMING_SNAKE_CASE : Dict = False
__SCREAMING_SNAKE_CASE : Any = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
__SCREAMING_SNAKE_CASE : List[Any] = i * 2
while index < n:
__SCREAMING_SNAKE_CASE : List[Any] = False
__SCREAMING_SNAKE_CASE : List[str] = index + i
__SCREAMING_SNAKE_CASE : Optional[Any] = [2]
for i in range(3 , _SCREAMING_SNAKE_CASE , 2 ):
if is_prime[i]:
primes.append(_SCREAMING_SNAKE_CASE )
return primes
def __A ( _SCREAMING_SNAKE_CASE : int = 9_9_9_9_6_6_6_6_3_3_3_3 ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = math.floor(math.sqrt(_SCREAMING_SNAKE_CASE ) ) + 1_0_0
__SCREAMING_SNAKE_CASE : Dict = prime_sieve(_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : int = 0
__SCREAMING_SNAKE_CASE : str = 0
__SCREAMING_SNAKE_CASE : Union[str, Any] = primes[prime_index]
while (last_prime**2) <= limit:
__SCREAMING_SNAKE_CASE : Dict = primes[prime_index + 1]
__SCREAMING_SNAKE_CASE : Tuple = last_prime**2
__SCREAMING_SNAKE_CASE : Optional[Any] = next_prime**2
# Get numbers divisible by lps(current)
__SCREAMING_SNAKE_CASE : Optional[int] = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
__SCREAMING_SNAKE_CASE : Optional[int] = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
__SCREAMING_SNAKE_CASE : Any = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
__SCREAMING_SNAKE_CASE : List[Any] = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 564 | 0 |
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
__a :Optional[int] = '0.12' # assumed parallelism: 8
@require_flax
@is_staging_test
class _a ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def __A ( cls : str ):
A_ = TOKEN
HfFolder.save_token(UpperCAmelCase )
@classmethod
def __A ( cls : str ):
try:
delete_repo(token=cls._token , repo_id="test-model-flax" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-model-flax-org" )
except HTTPError:
pass
def __A ( self : Any ):
A_ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
A_ = FlaxBertModel(UpperCAmelCase )
model.push_to_hub("test-model-flax" , use_auth_token=self._token )
A_ = FlaxBertModel.from_pretrained(f'''{USER}/test-model-flax''' )
A_ = flatten_dict(unfreeze(model.params ) )
A_ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
A_ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(UpperCAmelCase , 1E-3 , msg=f'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token , repo_id="test-model-flax" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(UpperCAmelCase , repo_id="test-model-flax" , push_to_hub=UpperCAmelCase , use_auth_token=self._token )
A_ = FlaxBertModel.from_pretrained(f'''{USER}/test-model-flax''' )
A_ = flatten_dict(unfreeze(model.params ) )
A_ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
A_ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(UpperCAmelCase , 1E-3 , msg=f'''{key} not identical''' )
def __A ( self : Tuple ):
A_ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
A_ = FlaxBertModel(UpperCAmelCase )
model.push_to_hub("valid_org/test-model-flax-org" , use_auth_token=self._token )
A_ = FlaxBertModel.from_pretrained("valid_org/test-model-flax-org" )
A_ = flatten_dict(unfreeze(model.params ) )
A_ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
A_ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(UpperCAmelCase , 1E-3 , msg=f'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-model-flax-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
UpperCAmelCase , repo_id="valid_org/test-model-flax-org" , push_to_hub=UpperCAmelCase , use_auth_token=self._token )
A_ = FlaxBertModel.from_pretrained("valid_org/test-model-flax-org" )
A_ = flatten_dict(unfreeze(model.params ) )
A_ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
A_ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(UpperCAmelCase , 1E-3 , msg=f'''{key} not identical''' )
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : int ):
"""simple docstring"""
A_ = True
A_ = flatten_dict(modela.params )
A_ = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1E-4:
A_ = False
return models_are_equal
@require_flax
class _a ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : Union[str, Any] ):
A_ = BertConfig.from_pretrained("hf-internal-testing/tiny-bert-flax-only" )
A_ = FlaxBertModel(UpperCAmelCase )
A_ = "bert"
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(UpperCAmelCase , UpperCAmelCase ) )
with self.assertRaises(UpperCAmelCase ):
A_ = FlaxBertModel.from_pretrained(UpperCAmelCase )
A_ = FlaxBertModel.from_pretrained(UpperCAmelCase , subfolder=UpperCAmelCase )
self.assertTrue(check_models_equal(UpperCAmelCase , UpperCAmelCase ) )
def __A ( self : Tuple ):
A_ = BertConfig.from_pretrained("hf-internal-testing/tiny-bert-flax-only" )
A_ = FlaxBertModel(UpperCAmelCase )
A_ = "bert"
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(UpperCAmelCase , UpperCAmelCase ) , max_shard_size="10KB" )
with self.assertRaises(UpperCAmelCase ):
A_ = FlaxBertModel.from_pretrained(UpperCAmelCase )
A_ = FlaxBertModel.from_pretrained(UpperCAmelCase , subfolder=UpperCAmelCase )
self.assertTrue(check_models_equal(UpperCAmelCase , UpperCAmelCase ) )
def __A ( self : Optional[Any] ):
A_ = "bert"
A_ = "hf-internal-testing/tiny-random-bert-subfolder"
with self.assertRaises(UpperCAmelCase ):
A_ = FlaxBertModel.from_pretrained(UpperCAmelCase )
A_ = FlaxBertModel.from_pretrained(UpperCAmelCase , subfolder=UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def __A ( self : Tuple ):
A_ = "bert"
A_ = "hf-internal-testing/tiny-random-bert-sharded-subfolder"
with self.assertRaises(UpperCAmelCase ):
A_ = FlaxBertModel.from_pretrained(UpperCAmelCase )
A_ = FlaxBertModel.from_pretrained(UpperCAmelCase , subfolder=UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase ) | 86 | """simple docstring"""
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
UpperCamelCase , UpperCamelCase , UpperCamelCase = False, False, False
@dataclass
class UpperCAmelCase__ :
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = None
lowerCAmelCase__ : bool = True
lowerCAmelCase__ : bool = True
lowerCAmelCase__ : Optional[str] = None
# Automatically constructed
lowerCAmelCase__ : ClassVar[str] = "dict"
lowerCAmelCase__ : ClassVar[Any] = pa.struct({"""bytes""": pa.binary(), """path""": pa.string()} )
lowerCAmelCase__ : str = field(default="""Audio""", init=__lowerCamelCase, repr=__lowerCamelCase )
def __call__( self ) -> Optional[int]:
return self.pa_type
def A ( self , _SCREAMING_SNAKE_CASE ) -> dict:
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("To support encoding audio data, please install 'soundfile'." ) from err
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return {"bytes": None, "path": value}
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
a_ : Dict = BytesIO()
sf.write(_SCREAMING_SNAKE_CASE , value["array"] , value["sampling_rate"] , format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("path" ) is not None and os.path.isfile(value["path"] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("pcm" ):
# "PCM" only has raw audio bytes
if value.get("sampling_rate" ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("To use PCM files, please specify a 'sampling_rate' in Audio object" )
if value.get("bytes" ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
a_ : Optional[Any] = np.frombuffer(value["bytes"] , dtype=np.intaa ).astype(np.floataa ) / 3_2_7_6_7
else:
a_ : List[str] = np.memmap(value["path"] , dtype="h" , mode="r" ).astype(np.floataa ) / 3_2_7_6_7
a_ : Optional[Any] = BytesIO(bytes() )
sf.write(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , value["sampling_rate"] , format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("path" )}
elif value.get("bytes" ) is not None or value.get("path" ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("bytes" ), "path": value.get("path" )}
else:
raise ValueError(
f'''An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> dict:
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Audio(decode=True) instead." )
a_ , a_ : Tuple = (value["path"], BytesIO(value["bytes"] )) if value["bytes"] is not None else (value["path"], None)
if path is None and file is None:
raise ValueError(f'''An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("To support decoding audio files, please install 'librosa' and 'soundfile'." ) from err
a_ : List[Any] = xsplitext(_SCREAMING_SNAKE_CASE )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
if file is None:
a_ : Dict = token_per_repo_id or {}
a_ : Optional[int] = path.split("::" )[-1]
try:
a_ : Tuple = string_to_dict(_SCREAMING_SNAKE_CASE , config.HUB_DATASETS_URL )["repo_id"]
a_ : Dict = token_per_repo_id[repo_id]
except (ValueError, KeyError):
a_ : Tuple = None
with xopen(_SCREAMING_SNAKE_CASE , "rb" , use_auth_token=_SCREAMING_SNAKE_CASE ) as f:
a_ , a_ : Optional[Any] = sf.read(_SCREAMING_SNAKE_CASE )
else:
a_ , a_ : Optional[int] = sf.read(_SCREAMING_SNAKE_CASE )
a_ : Tuple = array.T
if self.mono:
a_ : Optional[Any] = librosa.to_mono(_SCREAMING_SNAKE_CASE )
if self.sampling_rate and self.sampling_rate != sampling_rate:
a_ : Optional[int] = librosa.resample(_SCREAMING_SNAKE_CASE , orig_sr=_SCREAMING_SNAKE_CASE , target_sr=self.sampling_rate )
a_ : List[str] = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def A ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
if self.decode:
raise ValueError("Cannot flatten a decoded Audio feature." )
return {
"bytes": Value("binary" ),
"path": Value("string" ),
}
def A ( self , _SCREAMING_SNAKE_CASE ) -> pa.StructArray:
if pa.types.is_string(storage.type ):
a_ : List[Any] = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) , type=pa.binary() )
a_ : Union[str, Any] = pa.StructArray.from_arrays([bytes_array, storage] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
a_ : Tuple = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) , type=pa.string() )
a_ : List[str] = pa.StructArray.from_arrays([storage, path_array] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("array" ):
a_ : str = pa.array([Audio().encode_example(_SCREAMING_SNAKE_CASE ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("bytes" ) >= 0:
a_ : Union[str, Any] = storage.field("bytes" )
else:
a_ : Optional[Any] = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) , type=pa.binary() )
if storage.type.get_field_index("path" ) >= 0:
a_ : List[str] = storage.field("path" )
else:
a_ : List[str] = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) , type=pa.string() )
a_ : int = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=storage.is_null() )
return array_cast(_SCREAMING_SNAKE_CASE , self.pa_type )
def A ( self , _SCREAMING_SNAKE_CASE ) -> pa.StructArray:
@no_op_if_value_is_null
def path_to_bytes(_SCREAMING_SNAKE_CASE ):
with xopen(_SCREAMING_SNAKE_CASE , "rb" ) as f:
a_ : Union[str, Any] = f.read()
return bytes_
a_ : List[str] = pa.array(
[
(path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
a_ : Tuple = pa.array(
[os.path.basename(_SCREAMING_SNAKE_CASE ) if path is not None else None for path in storage.field("path" ).to_pylist()] , type=pa.string() , )
a_ : str = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null() )
return array_cast(_SCREAMING_SNAKE_CASE , self.pa_type )
| 473 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase : Tuple = logging.get_logger(__name__)
_lowerCAmelCase : Dict = {"vocab_file": "sentencepiece.bpe.model"}
_lowerCAmelCase : Tuple = {
"vocab_file": {
"moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez-orangesum-title": (
"https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"
),
},
}
_lowerCAmelCase : Any = {
"moussaKam/mbarthez": 1_0_2_4,
"moussaKam/barthez": 1_0_2_4,
"moussaKam/barthez-orangesum-title": 1_0_2_4,
}
_lowerCAmelCase : List[str] = "▁"
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = ['input_ids', 'attention_mask']
def __init__( self , lowerCamelCase , lowerCamelCase="<s>" , lowerCamelCase="</s>" , lowerCamelCase="</s>" , lowerCamelCase="<s>" , lowerCamelCase="<unk>" , lowerCamelCase="<pad>" , lowerCamelCase="<mask>" , lowerCamelCase = None , **lowerCamelCase , ) -> None:
"""simple docstring"""
snake_case__ : List[str] = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else mask_token
snake_case__ : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCamelCase , eos_token=lowerCamelCase , unk_token=lowerCamelCase , sep_token=lowerCamelCase , cls_token=lowerCamelCase , pad_token=lowerCamelCase , mask_token=lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase , )
snake_case__ : int = vocab_file
snake_case__ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCamelCase ) )
snake_case__ : int = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
snake_case__ : Any = len(self.sp_model ) - 1
snake_case__ : Any = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case__ : Any = [self.cls_token_id]
snake_case__ : Any = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase , token_ids_a=lowerCamelCase , already_has_special_tokens=lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase )) + [1]
return [1] + ([0] * len(lowerCamelCase )) + [1, 1] + ([0] * len(lowerCamelCase )) + [1]
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> List[int]:
"""simple docstring"""
snake_case__ : List[str] = [self.sep_token_id]
snake_case__ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
return len(self.sp_model )
def lowercase__ ( self ) -> List[str]:
"""simple docstring"""
snake_case__ : List[Any] = {self.convert_ids_to_tokens(lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowercase__ ( self , lowerCamelCase ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(lowerCamelCase , out_type=lowerCamelCase )
def lowercase__ ( self , lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
snake_case__ : Union[str, Any] = self.sp_model.PieceToId(lowerCamelCase )
return spm_id if spm_id else self.unk_token_id
def lowercase__ ( self , lowerCamelCase ) -> int:
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(lowerCamelCase )
def lowercase__ ( self , lowerCamelCase ) -> List[Any]:
"""simple docstring"""
snake_case__ : int = []
snake_case__ : Dict = ''''''
snake_case__ : str = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCamelCase ) + token
snake_case__ : Any = True
snake_case__ : List[str] = []
else:
current_sub_tokens.append(lowerCamelCase )
snake_case__ : Dict = False
out_string += self.sp_model.decode(lowerCamelCase )
return out_string.strip()
def __getstate__( self ) -> Optional[int]:
"""simple docstring"""
snake_case__ : str = self.__dict__.copy()
snake_case__ : int = None
return state
def __setstate__( self , lowerCamelCase ) -> str:
"""simple docstring"""
snake_case__ : int = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
snake_case__ : Dict = {}
snake_case__ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowerCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case__ : List[str] = os.path.join(
lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase , '''wb''' ) as fi:
snake_case__ : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase )
return (out_vocab_file,)
| 694 |
'''simple docstring'''
from sklearn.metrics import fa_score
import datasets
_lowerCAmelCase : List[Any] = "\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n"
_lowerCAmelCase : Tuple = "\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n\n - 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {'f1': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results['f1'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results['f1'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"macro\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"micro\")\n >>> print(round(results['f1'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"weighted\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'f1': array([0.8, 0. , 0. ])}\n"
_lowerCAmelCase : List[str] = "\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case ( datasets.Metric ):
"""simple docstring"""
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'''] , )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=1 , lowerCamelCase="binary" , lowerCamelCase=None ) -> List[Any]:
"""simple docstring"""
snake_case__ : Union[str, Any] = fa_score(
lowerCamelCase , lowerCamelCase , labels=lowerCamelCase , pos_label=lowerCamelCase , average=lowerCamelCase , sample_weight=lowerCamelCase )
return {"f1": float(lowerCamelCase ) if score.size == 1 else score}
| 694 | 1 |
'''simple docstring'''
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
a = logging.get_logger(__name__)
logging.set_verbosity_info()
def a_ ( __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
"""simple docstring"""
if "xprophetnet" in prophetnet_checkpoint_path:
snake_case: Optional[Any] =XLMProphetNetForConditionalGenerationOld.from_pretrained(__UpperCAmelCase )
snake_case , snake_case: str =XLMProphetNetForConditionalGeneration.from_pretrained(
__UpperCAmelCase , output_loading_info=__UpperCAmelCase )
else:
snake_case: Optional[int] =ProphetNetForConditionalGenerationOld.from_pretrained(__UpperCAmelCase )
snake_case , snake_case: List[str] =ProphetNetForConditionalGeneration.from_pretrained(
__UpperCAmelCase , output_loading_info=__UpperCAmelCase )
snake_case: str =['key_proj', 'value_proj', 'query_proj']
snake_case: str ={
'self_attn': 'ngram_self_attn',
'cross_attn': 'encoder_attn',
'cross_attn_layer_norm': 'encoder_attn_layer_norm',
'feed_forward_layer_norm': 'final_layer_norm',
'feed_forward': '',
'intermediate': 'fc1',
'output': 'fc2',
'key_proj': 'k_proj',
'query_proj': 'q_proj',
'value_proj': 'v_proj',
'word_embeddings': 'embed_tokens',
'embeddings_layer_norm': 'emb_layer_norm',
'relative_pos_embeddings': 'relative_linear',
'ngram_embeddings': 'ngram_input_embed',
'position_embeddings': 'embed_positions',
}
for key in loading_info["missing_keys"]:
snake_case: List[str] =key.split('.' )
if attributes[0] == "lm_head":
snake_case: Dict =prophet
snake_case: List[str] =prophet_old
else:
snake_case: Any =prophet.prophetnet
snake_case: int =prophet_old.model
snake_case: int =False
for attribute in attributes:
if attribute in mapping:
snake_case: Dict =mapping[attribute]
if not hasattr(__UpperCAmelCase , __UpperCAmelCase ) and len(__UpperCAmelCase ) > 0:
snake_case: Optional[int] =attribute
elif hasattr(__UpperCAmelCase , __UpperCAmelCase ):
snake_case: Optional[Any] =attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
snake_case: Optional[Any] =old_model.weight
logger.info(f'''{attribute} is initialized.''' )
snake_case: List[str] =True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
snake_case: List[Any] =old_model.bias
logger.info(f'''{attribute} is initialized''' )
snake_case: List[Any] =True
break
elif attribute in special_keys and hasattr(__UpperCAmelCase , 'in_proj_weight' ):
snake_case: Optional[int] =old_model.in_proj_weight.shape[0] // 3
snake_case: List[str] =getattr(__UpperCAmelCase , __UpperCAmelCase )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
snake_case: int =nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
snake_case: Any =nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
snake_case: Union[str, Any] =nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
snake_case: List[str] =nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
snake_case: List[str] =nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
snake_case: Union[str, Any] =nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
snake_case: Any =True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 5_12, "We want 512 position_embeddings."
snake_case: Dict =nn.Parameter(old_model.embed_positions.weight[:5_12, :] )
snake_case: Dict =True
break
if attribute.isdigit():
snake_case: int =model[int(__UpperCAmelCase )]
snake_case: Optional[int] =old_model[int(__UpperCAmelCase )]
else:
snake_case: List[str] =getattr(__UpperCAmelCase , __UpperCAmelCase )
if old_attribute == "":
snake_case: Union[str, Any] =old_model
else:
if not hasattr(__UpperCAmelCase , __UpperCAmelCase ):
raise ValueError(f'''{old_model} does not have {old_attribute}''' )
snake_case: List[Any] =getattr(__UpperCAmelCase , __UpperCAmelCase )
if not is_key_init:
raise ValueError(f'''{key} was not correctly initialized!''' )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
prophet.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--prophetnet_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
a = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 350 |
'''simple docstring'''
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
a = '.'
if __name__ == "__main__":
a = os.path.join(REPO_PATH, 'utils/documentation_tests.txt')
a = []
a = []
with open(doctest_file_path) as fp:
for line in fp:
a = line.strip()
a = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
a = '\n'.join(non_existent_paths)
raise ValueError(F"""`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}""")
if all_paths != sorted(all_paths):
raise ValueError('Files in `utils/documentation_tests.txt` are not in alphabetical order.')
| 350 | 1 |
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
UpperCamelCase__ =get_logger(__name__)
UpperCamelCase__ =Path(__file__).parent / 'model_card_template.md'
UpperCamelCase__ =uuida().hex
UpperCamelCase__ =os.getenv('HF_HUB_OFFLINE', '').upper() in ENV_VARS_TRUE_VALUES
UpperCamelCase__ =os.getenv('DISABLE_TELEMETRY', '').upper() in ENV_VARS_TRUE_VALUES
UpperCamelCase__ =HUGGINGFACE_CO_RESOLVE_ENDPOINT + '/api/telemetry/'
def lowerCamelCase__ (__lowerCamelCase = None ):
_SCREAMING_SNAKE_CASE : List[str] = f"""diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}"""
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += f"""; torch/{_torch_version}"""
if is_flax_available():
ua += f"""; jax/{_jax_version}"""
ua += f"""; flax/{_flax_version}"""
if is_onnx_available():
ua += f"""; onnxruntime/{_onnxruntime_version}"""
# CI will set this value to True
if os.environ.get("DIFFUSERS_IS_CI", "" ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(__lowerCamelCase, __lowerCamelCase ):
ua += "; " + "; ".join(f"""{k}/{v}""" for k, v in user_agent.items() )
elif isinstance(__lowerCamelCase, __lowerCamelCase ):
ua += "; " + user_agent
return ua
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase = None, __lowerCamelCase = None ):
if token is None:
_SCREAMING_SNAKE_CASE : List[Any] = HfFolder.get_token()
if organization is None:
_SCREAMING_SNAKE_CASE : Optional[int] = whoami(__lowerCamelCase )["name"]
return f"""{username}/{model_id}"""
else:
return f"""{organization}/{model_id}"""
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
if not is_jinja_available():
raise ValueError(
"Modelcard rendering is based on Jinja templates."
" Please make sure to have `jinja` installed before using `create_model_card`."
" To install it, please run `pip install Jinja2`." )
if hasattr(__lowerCamelCase, "local_rank" ) and args.local_rank not in [-1, 0]:
return
_SCREAMING_SNAKE_CASE : Optional[Any] = args.hub_token if hasattr(__lowerCamelCase, "hub_token" ) else None
_SCREAMING_SNAKE_CASE : Dict = get_full_repo_name(__lowerCamelCase, token=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language="en", license="apache-2.0", library_name="diffusers", tags=[], datasets=args.dataset_name, metrics=[], ), template_path=__lowerCamelCase, model_name=__lowerCamelCase, repo_name=__lowerCamelCase, dataset_name=args.dataset_name if hasattr(__lowerCamelCase, "dataset_name" ) else None, learning_rate=args.learning_rate, train_batch_size=args.train_batch_size, eval_batch_size=args.eval_batch_size, gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(__lowerCamelCase, "gradient_accumulation_steps" ) else None
), adam_betaa=args.adam_betaa if hasattr(__lowerCamelCase, "adam_beta1" ) else None, adam_betaa=args.adam_betaa if hasattr(__lowerCamelCase, "adam_beta2" ) else None, adam_weight_decay=args.adam_weight_decay if hasattr(__lowerCamelCase, "adam_weight_decay" ) else None, adam_epsilon=args.adam_epsilon if hasattr(__lowerCamelCase, "adam_epsilon" ) else None, lr_scheduler=args.lr_scheduler if hasattr(__lowerCamelCase, "lr_scheduler" ) else None, lr_warmup_steps=args.lr_warmup_steps if hasattr(__lowerCamelCase, "lr_warmup_steps" ) else None, ema_inv_gamma=args.ema_inv_gamma if hasattr(__lowerCamelCase, "ema_inv_gamma" ) else None, ema_power=args.ema_power if hasattr(__lowerCamelCase, "ema_power" ) else None, ema_max_decay=args.ema_max_decay if hasattr(__lowerCamelCase, "ema_max_decay" ) else None, mixed_precision=args.mixed_precision, )
_SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(args.output_dir, "README.md" )
model_card.save(__lowerCamelCase )
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase = None ):
if resolved_file is None or commit_hash is not None:
return commit_hash
_SCREAMING_SNAKE_CASE : Any = str(Path(__lowerCamelCase ).as_posix() )
_SCREAMING_SNAKE_CASE : Optional[int] = re.search(R"snapshots/([^/]+)/", __lowerCamelCase )
if search is None:
return None
_SCREAMING_SNAKE_CASE : str = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(__lowerCamelCase ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
UpperCamelCase__ =os.path.expanduser(
os.getenv('HF_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'huggingface'))
)
UpperCamelCase__ =os.path.join(hf_cache_home, 'diffusers')
def lowerCamelCase__ (__lowerCamelCase = None, __lowerCamelCase = None ):
if new_cache_dir is None:
_SCREAMING_SNAKE_CASE : Union[str, Any] = DIFFUSERS_CACHE
if old_cache_dir is None:
_SCREAMING_SNAKE_CASE : List[str] = old_diffusers_cache
_SCREAMING_SNAKE_CASE : Dict = Path(__lowerCamelCase ).expanduser()
_SCREAMING_SNAKE_CASE : Optional[int] = Path(__lowerCamelCase ).expanduser()
for old_blob_path in old_cache_dir.glob("**/blobs/*" ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
_SCREAMING_SNAKE_CASE : Union[str, Any] = new_cache_dir / old_blob_path.relative_to(__lowerCamelCase )
new_blob_path.parent.mkdir(parents=__lowerCamelCase, exist_ok=__lowerCamelCase )
os.replace(__lowerCamelCase, __lowerCamelCase )
try:
os.symlink(__lowerCamelCase, __lowerCamelCase )
except OSError:
logger.warning(
"Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded." )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
UpperCamelCase__ =os.path.join(DIFFUSERS_CACHE, 'version_diffusers_cache.txt')
if not os.path.isfile(cache_version_file):
UpperCamelCase__ =0
else:
with open(cache_version_file) as f:
try:
UpperCamelCase__ =int(f.read())
except ValueError:
UpperCamelCase__ =0
if cache_version < 1:
UpperCamelCase__ =os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
'The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your '
'existing cached models. This is a one-time operation, you can interrupt it or run it '
'later by calling `diffusers.utils.hub_utils.move_cache()`.'
)
try:
move_cache()
except Exception as e:
UpperCamelCase__ ='\n'.join(traceback.format_tb(e.__traceback__))
logger.error(
f"There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease "
'file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole '
'message and we will do our best to help.'
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, 'w') as f:
f.write('1')
except Exception:
logger.warning(
f"There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure "
'the directory exists and can be written to.'
)
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase = None ):
if variant is not None:
_SCREAMING_SNAKE_CASE : Union[str, Any] = weights_name.split("." )
_SCREAMING_SNAKE_CASE : List[Any] = splits[:-1] + [variant] + splits[-1:]
_SCREAMING_SNAKE_CASE : Dict = ".".join(__lowerCamelCase )
return weights_name
def lowerCamelCase__ (__lowerCamelCase, *,
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=None, ):
_SCREAMING_SNAKE_CASE : List[Any] = str(__lowerCamelCase )
if os.path.isfile(__lowerCamelCase ):
return pretrained_model_name_or_path
elif os.path.isdir(__lowerCamelCase ):
if os.path.isfile(os.path.join(__lowerCamelCase, __lowerCamelCase ) ):
# Load from a PyTorch checkpoint
_SCREAMING_SNAKE_CASE : int = os.path.join(__lowerCamelCase, __lowerCamelCase )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase ) ):
_SCREAMING_SNAKE_CASE : List[Any] = os.path.join(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
return model_file
else:
raise EnvironmentError(
f"""Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.""" )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(__lowerCamelCase ).base_version ) >= version.parse("0.20.0" )
):
try:
_SCREAMING_SNAKE_CASE : Union[str, Any] = hf_hub_download(
__lowerCamelCase, filename=_add_variant(__lowerCamelCase, __lowerCamelCase ), cache_dir=__lowerCamelCase, force_download=__lowerCamelCase, proxies=__lowerCamelCase, resume_download=__lowerCamelCase, local_files_only=__lowerCamelCase, use_auth_token=__lowerCamelCase, user_agent=__lowerCamelCase, subfolder=__lowerCamelCase, revision=revision or commit_hash, )
warnings.warn(
f"""Loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'` is deprecated. Loading instead from `revision='main'` with `variant={revision}`. Loading model variants via `revision='{revision}'` will be removed in diffusers v1. Please use `variant='{revision}'` instead.""", __lowerCamelCase, )
return model_file
except: # noqa: E722
warnings.warn(
f"""You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant='{revision}'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(__lowerCamelCase, __lowerCamelCase )} file in the 'main' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title '{pretrained_model_name_or_path} is missing {_add_variant(__lowerCamelCase, __lowerCamelCase )}' so that the correct variant file can be added.""", __lowerCamelCase, )
try:
# 2. Load model file as usual
_SCREAMING_SNAKE_CASE : Dict = hf_hub_download(
__lowerCamelCase, filename=__lowerCamelCase, cache_dir=__lowerCamelCase, force_download=__lowerCamelCase, proxies=__lowerCamelCase, resume_download=__lowerCamelCase, local_files_only=__lowerCamelCase, use_auth_token=__lowerCamelCase, user_agent=__lowerCamelCase, subfolder=__lowerCamelCase, revision=revision or commit_hash, )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
f"""{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier """
"listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a "
"token having permission to this repo with `use_auth_token` or log in with `huggingface-cli "
"login`." )
except RevisionNotFoundError:
raise EnvironmentError(
f"""{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for """
"this model name. Check the model page at "
f"""'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions.""" )
except EntryNotFoundError:
raise EnvironmentError(
f"""{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.""" )
except HTTPError as err:
raise EnvironmentError(
f"""There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}""" )
except ValueError:
raise EnvironmentError(
f"""We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it"""
f""" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a"""
f""" directory containing a file named {weights_name} or"""
" \nCheckout your internet connection or see how to run the library in"
" offline mode at 'https://huggingface.co/docs/diffusers/installation#offline-mode'." )
except EnvironmentError:
raise EnvironmentError(
f"""Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from """
"'https://huggingface.co/models', make sure you don't have a local directory with the same name. "
f"""Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory """
f"""containing a file named {weights_name}""" ) | 381 |
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class lowerCAmelCase__:
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase=1_3 , __lowerCamelCase=7 , __lowerCamelCase=6 , __lowerCamelCase=1_7 , __lowerCamelCase=2_3 , __lowerCamelCase=1_1 , __lowerCamelCase=True , ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : List[Any] = parent
_SCREAMING_SNAKE_CASE : Optional[Any] = batch_size
_SCREAMING_SNAKE_CASE : int = seq_length
_SCREAMING_SNAKE_CASE : Optional[Any] = act_dim
_SCREAMING_SNAKE_CASE : Optional[int] = state_dim
_SCREAMING_SNAKE_CASE : Optional[int] = hidden_size
_SCREAMING_SNAKE_CASE : Any = max_length
_SCREAMING_SNAKE_CASE : Optional[int] = is_training
def UpperCamelCase_ ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE : List[str] = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
_SCREAMING_SNAKE_CASE : Union[str, Any] = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
_SCREAMING_SNAKE_CASE : Union[str, Any] = floats_tensor((self.batch_size, self.seq_length, 1) )
_SCREAMING_SNAKE_CASE : str = floats_tensor((self.batch_size, self.seq_length, 1) )
_SCREAMING_SNAKE_CASE : str = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1_0_0_0 )
_SCREAMING_SNAKE_CASE : List[Any] = random_attention_mask((self.batch_size, self.seq_length) )
_SCREAMING_SNAKE_CASE : List[str] = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def UpperCamelCase_ ( self ) -> List[Any]:
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> Any:
_SCREAMING_SNAKE_CASE : List[Any] = DecisionTransformerModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
_SCREAMING_SNAKE_CASE : List[str] = model(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def UpperCamelCase_ ( self ) -> Dict:
_SCREAMING_SNAKE_CASE : int = self.prepare_config_and_inputs()
(
(
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) ,
) : Optional[Any] = config_and_inputs
_SCREAMING_SNAKE_CASE : Any = {
"states": states,
"actions": actions,
"rewards": rewards,
"returns_to_go": returns_to_go,
"timesteps": timesteps,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_torch
class lowerCAmelCase__( __lowercase , __lowercase , __lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = (DecisionTransformerModel,) if is_torch_available() else ()
__snake_case = ()
__snake_case = {'feature-extraction': DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
__snake_case = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
__snake_case = False
__snake_case = False
__snake_case = False
__snake_case = False
__snake_case = False
__snake_case = False
__snake_case = False
__snake_case = False
__snake_case = False
def UpperCamelCase_ ( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : int = DecisionTransformerModelTester(self )
_SCREAMING_SNAKE_CASE : Optional[Any] = ConfigTester(self , config_class=__lowerCamelCase , hidden_size=3_7 )
def UpperCamelCase_ ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
@slow
def UpperCamelCase_ ( self ) -> Dict:
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE : str = DecisionTransformerModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def UpperCamelCase_ ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE : Tuple = model_class(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_SCREAMING_SNAKE_CASE : int = [*signature.parameters.keys()]
_SCREAMING_SNAKE_CASE : Tuple = [
"states",
"actions",
"rewards",
"returns_to_go",
"timesteps",
"attention_mask",
]
self.assertListEqual(arg_names[: len(__lowerCamelCase )] , __lowerCamelCase )
@require_torch
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase_ ( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : str = 2 # number of steps of autoregressive prediction we will perform
_SCREAMING_SNAKE_CASE : Tuple = 1_0 # defined by the RL environment, may be normalized
_SCREAMING_SNAKE_CASE : int = DecisionTransformerModel.from_pretrained("edbeeching/decision-transformer-gym-hopper-expert" )
_SCREAMING_SNAKE_CASE : Union[str, Any] = model.to(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = model.config
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE : Optional[Any] = torch.randn(1 , 1 , config.state_dim ).to(device=__lowerCamelCase , dtype=torch.floataa ) # env.reset()
_SCREAMING_SNAKE_CASE : List[str] = torch.tensor(
[[0.24_2793, -0.2869_3074, 0.874_2613], [0.6781_5274, -0.0810_1085, -0.1295_2147]] , device=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(__lowerCamelCase , device=__lowerCamelCase , dtype=torch.floataa ).reshape(1 , 1 , 1 )
_SCREAMING_SNAKE_CASE : List[str] = state
_SCREAMING_SNAKE_CASE : Union[str, Any] = torch.zeros(1 , 0 , config.act_dim , device=__lowerCamelCase , dtype=torch.floataa )
_SCREAMING_SNAKE_CASE : Dict = torch.zeros(1 , 0 , device=__lowerCamelCase , dtype=torch.floataa )
_SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(0 , device=__lowerCamelCase , dtype=torch.long ).reshape(1 , 1 )
for step in range(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : int = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=__lowerCamelCase )] , dim=1 )
_SCREAMING_SNAKE_CASE : Any = torch.cat([rewards, torch.zeros(1 , 1 , device=__lowerCamelCase )] , dim=1 )
_SCREAMING_SNAKE_CASE : Union[str, Any] = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[Any] = model(
states=__lowerCamelCase , actions=__lowerCamelCase , rewards=__lowerCamelCase , returns_to_go=__lowerCamelCase , timesteps=__lowerCamelCase , attention_mask=__lowerCamelCase , return_dict=__lowerCamelCase , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1E-4 ) )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Tuple = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=__lowerCamelCase , dtype=torch.floataa ),
1.0,
False,
{},
)
_SCREAMING_SNAKE_CASE : Optional[Any] = action_pred[0, -1]
_SCREAMING_SNAKE_CASE : Optional[Any] = torch.cat([states, state] , dim=1 )
_SCREAMING_SNAKE_CASE : List[str] = returns_to_go[0, -1] - reward
_SCREAMING_SNAKE_CASE : str = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
_SCREAMING_SNAKE_CASE : Tuple = torch.cat(
[timesteps, torch.ones((1, 1) , device=__lowerCamelCase , dtype=torch.long ) * (step + 1)] , dim=1 ) | 381 | 1 |
"""simple docstring"""
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class __lowerCAmelCase ( _lowercase ):
"""simple docstring"""
def __lt__( self : List[Any] , _snake_case : Optional[Any] ) -> Any:
"""simple docstring"""
return self[-1] < other[-1]
def __eq__( self : Any , _snake_case : int ) -> Tuple:
"""simple docstring"""
return self[-1] == other[-1]
def A_ (__a ):
'''simple docstring'''
A_ = []
# sort into stacks
for element in collection:
A_ = Stack([element] )
A_ = bisect_left(__a , __a )
if i != len(__a ):
stacks[i].append(__a )
else:
stacks.append(__a )
# use a heap-based merge to merge stack efficiently
A_ = merge(*(reversed(__a ) for stack in stacks) )
return collection
if __name__ == "__main__":
UpperCamelCase_ : List[Any] = input('''Enter numbers separated by a comma:\n''').strip()
UpperCamelCase_ : Optional[int] = [int(item) for item in user_input.split(''',''')]
print(patience_sort(unsorted))
| 115 |
"""simple docstring"""
from __future__ import annotations
def A_ (__a , __a = None , __a = None ):
'''simple docstring'''
if start is None:
A_ = 0
if end is None:
A_ = len(__a ) - 1
if start >= end:
return
A_ = (start + end) // 2
slowsort(__a , __a , __a )
slowsort(__a , mid + 1 , __a )
if sequence[end] < sequence[mid]:
A_ , A_ = sequence[mid], sequence[end]
slowsort(__a , __a , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 115 | 1 |
def lowercase ( SCREAMING_SNAKE_CASE__ : list , SCREAMING_SNAKE_CASE__ : list , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> int:
if index == number_of_items:
return 0
_snake_case : Any = 0
_snake_case : str = 0
_snake_case : Optional[int] = knapsack(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , index + 1 )
if weights[index] <= max_weight:
_snake_case : Tuple = values[index] + knapsack(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , max_weight - weights[index] , index + 1 )
return max(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 198 |
import argparse
import os
import re
a__ = """src/transformers/models/auto"""
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
a__ = re.compile(R"""[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict""")
# re pattern that matches identifiers in mappings
a__ = re.compile(R"""\s*\(\s*\"(\S[^\"]+)\"""")
def lowercase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : bool = False ) -> Optional[Any]:
with open(SCREAMING_SNAKE_CASE__ , """r""" , encoding="""utf-8""" ) as f:
_snake_case : int = f.read()
_snake_case : str = content.split("""\n""" )
_snake_case : List[str] = []
_snake_case : int = 0
while line_idx < len(SCREAMING_SNAKE_CASE__ ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
_snake_case : Union[str, Any] = len(re.search(R"""^(\s*)\S""" , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(""" """ * indent + """(""" ):
new_lines.append(lines[line_idx] )
line_idx += 1
_snake_case : int = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
_snake_case : Optional[Any] = line_idx
while not lines[line_idx].startswith(""" """ * indent + """)""" ):
line_idx += 1
blocks.append("""\n""".join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
_snake_case : int = sorted(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : _re_identifier.search(SCREAMING_SNAKE_CASE__ ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(SCREAMING_SNAKE_CASE__ , """w""" , encoding="""utf-8""" ) as f:
f.write("""\n""".join(SCREAMING_SNAKE_CASE__ ) )
elif "\n".join(SCREAMING_SNAKE_CASE__ ) != content:
return True
def lowercase ( SCREAMING_SNAKE_CASE__ : bool = False ) -> int:
_snake_case : List[Any] = [os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for f in os.listdir(SCREAMING_SNAKE_CASE__ ) if f.endswith(""".py""" )]
_snake_case : str = [sort_auto_mapping(SCREAMING_SNAKE_CASE__ , overwrite=SCREAMING_SNAKE_CASE__ ) for fname in fnames]
if not overwrite and any(SCREAMING_SNAKE_CASE__ ):
_snake_case : List[Any] = [f for f, d in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if d]
raise ValueError(
F'''The following files have auto mappings that need sorting: {', '.join(SCREAMING_SNAKE_CASE__ )}. Run `make style` to fix'''
""" this.""" )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
a__ = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 198 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {
"""MIT/ast-finetuned-audioset-10-10-0.4593""": (
"""https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json"""
),
}
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : List[str] = '''audio-spectrogram-transformer'''
def __init__( self : Union[str, Any] , UpperCamelCase__ : int=7_6_8 , UpperCamelCase__ : List[str]=1_2 , UpperCamelCase__ : str=1_2 , UpperCamelCase__ : str=3_0_7_2 , UpperCamelCase__ : Tuple="gelu" , UpperCamelCase__ : int=0.0 , UpperCamelCase__ : List[str]=0.0 , UpperCamelCase__ : Dict=0.02 , UpperCamelCase__ : str=1E-12 , UpperCamelCase__ : Tuple=1_6 , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : List[str]=1_0 , UpperCamelCase__ : List[str]=1_0 , UpperCamelCase__ : Any=1_0_2_4 , UpperCamelCase__ : Optional[int]=1_2_8 , **UpperCamelCase__ : List[Any] , ):
'''simple docstring'''
super().__init__(**UpperCamelCase__)
snake_case__ = hidden_size
snake_case__ = num_hidden_layers
snake_case__ = num_attention_heads
snake_case__ = intermediate_size
snake_case__ = hidden_act
snake_case__ = hidden_dropout_prob
snake_case__ = attention_probs_dropout_prob
snake_case__ = initializer_range
snake_case__ = layer_norm_eps
snake_case__ = patch_size
snake_case__ = qkv_bias
snake_case__ = frequency_stride
snake_case__ = time_stride
snake_case__ = max_length
snake_case__ = num_mel_bins
| 654 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
a__ = """"""
a__ = """"""
a__ = """"""
a__ = 1 # (0 is vertical, 1 is horizontal)
def _UpperCAmelCase ( ):
snake_case__ , snake_case__ = get_dataset(a , a )
print("""Processing...""" )
snake_case__ , snake_case__ , snake_case__ = update_image_and_anno(a , a , a )
for index, image in enumerate(a ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
snake_case__ = random_chars(32 )
snake_case__ = paths[index].split(os.sep )[-1].rsplit(""".""" , 1 )[0]
snake_case__ = F'''{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}'''
cva.imwrite(F'''/{file_root}.jpg''' , a , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F'''Success {index+1}/{len(a )} with {file_name}''' )
snake_case__ = []
for anno in new_annos[index]:
snake_case__ = F'''{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}'''
annos_list.append(a )
with open(F'''/{file_root}.txt''' , """w""" ) as outfile:
outfile.write("""\n""".join(line for line in annos_list ) )
def _UpperCAmelCase ( a : str , a : str ):
snake_case__ = []
snake_case__ = []
for label_file in glob.glob(os.path.join(a , """*.txt""" ) ):
snake_case__ = label_file.split(os.sep )[-1].rsplit(""".""" , 1 )[0]
with open(a ) as in_file:
snake_case__ = in_file.readlines()
snake_case__ = os.path.join(a , F'''{label_name}.jpg''' )
snake_case__ = []
for obj_list in obj_lists:
snake_case__ = obj_list.rstrip("""\n""" ).split(""" """ )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(a )
labels.append(a )
return img_paths, labels
def _UpperCAmelCase ( a : list , a : list , a : int = 1 ):
snake_case__ = []
snake_case__ = []
snake_case__ = []
for idx in range(len(a ) ):
snake_case__ = []
snake_case__ = img_list[idx]
path_list.append(a )
snake_case__ = anno_list[idx]
snake_case__ = cva.imread(a )
if flip_type == 1:
snake_case__ = cva.flip(a , a )
for bbox in img_annos:
snake_case__ = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
snake_case__ = cva.flip(a , a )
for bbox in img_annos:
snake_case__ = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(a )
new_imgs_list.append(a )
return new_imgs_list, new_annos_lists, path_list
def _UpperCAmelCase ( a : int = 32 ):
assert number_char > 1, "The number of character should greater than 1"
snake_case__ = ascii_lowercase + digits
return "".join(random.choice(a ) for _ in range(a ) )
if __name__ == "__main__":
main()
print("""DONE ✅""")
| 654 | 1 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ : List[Any] = logging.get_logger(__name__)
UpperCamelCase_ : Any = {
'''microsoft/unispeech-large-1500h-cv''': (
'''https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json'''
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class __lowerCAmelCase ( __A ):
"""simple docstring"""
snake_case = "unispeech"
def __init__( self : int , _snake_case : Optional[Any]=32 , _snake_case : List[str]=768 , _snake_case : Union[str, Any]=12 , _snake_case : List[str]=12 , _snake_case : str=3_072 , _snake_case : Tuple="gelu" , _snake_case : Union[str, Any]=0.1 , _snake_case : Dict=0.1 , _snake_case : Dict=0.1 , _snake_case : int=0.0 , _snake_case : List[Any]=0.0 , _snake_case : Optional[int]=0.1 , _snake_case : List[str]=0.1 , _snake_case : List[str]=0.0_2 , _snake_case : Any=1e-5 , _snake_case : Tuple="group" , _snake_case : Optional[Any]="gelu" , _snake_case : Optional[Any]=(512, 512, 512, 512, 512, 512, 512) , _snake_case : Dict=(5, 2, 2, 2, 2, 2, 2) , _snake_case : Optional[Any]=(10, 3, 3, 3, 3, 2, 2) , _snake_case : Optional[int]=False , _snake_case : Dict=128 , _snake_case : Optional[int]=16 , _snake_case : Optional[Any]=False , _snake_case : List[str]=True , _snake_case : Tuple=0.0_5 , _snake_case : Any=10 , _snake_case : List[str]=2 , _snake_case : Optional[int]=0.0 , _snake_case : Tuple=10 , _snake_case : Optional[int]=0 , _snake_case : str=320 , _snake_case : str=2 , _snake_case : List[Any]=0.1 , _snake_case : Optional[int]=100 , _snake_case : int=256 , _snake_case : Optional[Any]=256 , _snake_case : int=0.1 , _snake_case : Optional[Any]="mean" , _snake_case : Tuple=False , _snake_case : List[str]=False , _snake_case : Union[str, Any]=256 , _snake_case : int=80 , _snake_case : List[str]=0 , _snake_case : Union[str, Any]=1 , _snake_case : Dict=2 , _snake_case : Dict=0.5 , **_snake_case : List[str] , ) -> int:
"""simple docstring"""
super().__init__(**_snake_case , pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case )
A_ = hidden_size
A_ = feat_extract_norm
A_ = feat_extract_activation
A_ = list(_snake_case )
A_ = list(_snake_case )
A_ = list(_snake_case )
A_ = conv_bias
A_ = num_conv_pos_embeddings
A_ = num_conv_pos_embedding_groups
A_ = len(self.conv_dim )
A_ = num_hidden_layers
A_ = intermediate_size
A_ = hidden_act
A_ = num_attention_heads
A_ = hidden_dropout
A_ = attention_dropout
A_ = activation_dropout
A_ = feat_proj_dropout
A_ = final_dropout
A_ = layerdrop
A_ = layer_norm_eps
A_ = initializer_range
A_ = num_ctc_classes
A_ = vocab_size
A_ = do_stable_layer_norm
A_ = use_weighted_layer_sum
A_ = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'
F' `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
A_ = apply_spec_augment
A_ = mask_time_prob
A_ = mask_time_length
A_ = mask_time_min_masks
A_ = mask_feature_prob
A_ = mask_feature_length
A_ = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
A_ = num_codevectors_per_group
A_ = num_codevector_groups
A_ = contrastive_logits_temperature
A_ = feat_quantizer_dropout
A_ = num_negatives
A_ = codevector_dim
A_ = proj_codevector_dim
A_ = diversity_loss_weight
# ctc loss
A_ = ctc_loss_reduction
A_ = ctc_zero_infinity
# pretraining loss
A_ = replace_prob
@property
def lowerCamelCase__ ( self : Tuple ) -> int:
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 704 |
"""simple docstring"""
from __future__ import annotations
import math
UpperCamelCase_ : List[str] = '''2020.9.26'''
UpperCamelCase_ : List[Any] = '''xcodz-dot, cclaus, dhruvmanila'''
def A_ (__a , __a , __a , __a , __a ):
'''simple docstring'''
if not all(isinstance(__a , (float, int) ) for val in locals().values() ):
A_ = f'Input values must either be float or int: {list(locals().values() )}'
raise TypeError(__a )
A_ = ((x * distance) / (z + distance)) * scale
A_ = ((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def A_ (__a , __a , __a , __a , __a ):
'''simple docstring'''
if not isinstance(__a , __a ):
raise TypeError("Axis must be a str" )
A_ = locals()
del input_variables["axis"]
if not all(isinstance(__a , (float, int) ) for val in input_variables.values() ):
A_ = (
"Input values except axis must either be float or int: "
f'{list(input_variables.values() )}'
)
raise TypeError(__a )
A_ = (angle % 360) / 450 * 180 / math.pi
if axis == "z":
A_ = x * math.cos(__a ) - y * math.sin(__a )
A_ = y * math.cos(__a ) + x * math.sin(__a )
A_ = z
elif axis == "x":
A_ = y * math.cos(__a ) - z * math.sin(__a )
A_ = z * math.cos(__a ) + y * math.sin(__a )
A_ = x
elif axis == "y":
A_ = x * math.cos(__a ) - z * math.sin(__a )
A_ = z * math.cos(__a ) + x * math.sin(__a )
A_ = y
else:
raise ValueError("not a valid axis, choose one of 'x', 'y', 'z'" )
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }""")
print(F"""{rotate(1.0, 2.0, 3.0, 'y', 90.0) = }""")
| 482 | 0 |
"""simple docstring"""
import os
import sys
import unittest
_lowerCAmelCase : Any = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
_lowerCAmelCase : Tuple = os.path.join('''tests''', '''models''', '''bert''', '''test_modeling_bert.py''')
_lowerCAmelCase : str = os.path.join('''tests''', '''models''', '''blip''', '''test_modeling_blip.py''')
class A_ ( unittest.TestCase ):
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : int = get_test_to_tester_mapping(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = get_test_to_tester_mapping(__lowerCAmelCase )
_lowerCamelCase : Dict = {"BertModelTest": "BertModelTester"}
_lowerCamelCase : Union[str, Any] = {
"BlipModelTest": "BlipModelTester",
"BlipTextImageModelTest": "BlipTextImageModelsModelTester",
"BlipTextModelTest": "BlipTextModelTester",
"BlipTextRetrievalModelTest": "BlipTextRetrievalModelTester",
"BlipVQAModelTest": "BlipVQAModelTester",
"BlipVisionModelTest": "BlipVisionModelTester",
}
self.assertEqual(get_test_info.to_json(__lowerCAmelCase ) ,__lowerCAmelCase )
self.assertEqual(get_test_info.to_json(__lowerCAmelCase ) ,__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : Any = get_model_to_test_mapping(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = get_model_to_test_mapping(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = {
"BertForMaskedLM": ["BertModelTest"],
"BertForMultipleChoice": ["BertModelTest"],
"BertForNextSentencePrediction": ["BertModelTest"],
"BertForPreTraining": ["BertModelTest"],
"BertForQuestionAnswering": ["BertModelTest"],
"BertForSequenceClassification": ["BertModelTest"],
"BertForTokenClassification": ["BertModelTest"],
"BertLMHeadModel": ["BertModelTest"],
"BertModel": ["BertModelTest"],
}
_lowerCamelCase : Optional[Any] = {
"BlipForConditionalGeneration": ["BlipTextImageModelTest"],
"BlipForImageTextRetrieval": ["BlipTextRetrievalModelTest"],
"BlipForQuestionAnswering": ["BlipVQAModelTest"],
"BlipModel": ["BlipModelTest"],
"BlipTextModel": ["BlipTextModelTest"],
"BlipVisionModel": ["BlipVisionModelTest"],
}
self.assertEqual(get_test_info.to_json(__lowerCAmelCase ) ,__lowerCAmelCase )
self.assertEqual(get_test_info.to_json(__lowerCAmelCase ) ,__lowerCAmelCase )
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : int = get_model_to_tester_mapping(__lowerCAmelCase )
_lowerCamelCase : Dict = get_model_to_tester_mapping(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = {
"BertForMaskedLM": ["BertModelTester"],
"BertForMultipleChoice": ["BertModelTester"],
"BertForNextSentencePrediction": ["BertModelTester"],
"BertForPreTraining": ["BertModelTester"],
"BertForQuestionAnswering": ["BertModelTester"],
"BertForSequenceClassification": ["BertModelTester"],
"BertForTokenClassification": ["BertModelTester"],
"BertLMHeadModel": ["BertModelTester"],
"BertModel": ["BertModelTester"],
}
_lowerCamelCase : List[str] = {
"BlipForConditionalGeneration": ["BlipTextImageModelsModelTester"],
"BlipForImageTextRetrieval": ["BlipTextRetrievalModelTester"],
"BlipForQuestionAnswering": ["BlipVQAModelTester"],
"BlipModel": ["BlipModelTester"],
"BlipTextModel": ["BlipTextModelTester"],
"BlipVisionModel": ["BlipVisionModelTester"],
}
self.assertEqual(get_test_info.to_json(__lowerCAmelCase ) ,__lowerCAmelCase )
self.assertEqual(get_test_info.to_json(__lowerCAmelCase ) ,__lowerCAmelCase ) | 46 |
"""simple docstring"""
def lowercase__ ( snake_case_ :Dict ): # noqa: E741
__UpperCAmelCase = len(snake_case_ )
__UpperCAmelCase = 0
__UpperCAmelCase = [0] * n
__UpperCAmelCase = [False] * n
__UpperCAmelCase = [False] * n
def dfs(snake_case_ :Tuple , snake_case_ :Union[str, Any] , snake_case_ :Any , snake_case_ :int ):
if parent == root:
out_edge_count += 1
__UpperCAmelCase = True
__UpperCAmelCase = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
__UpperCAmelCase = dfs(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
__UpperCAmelCase = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
__UpperCAmelCase = True
# AP found via cycle
if at == low[to]:
__UpperCAmelCase = True
else:
__UpperCAmelCase = min(low[at] , snake_case_ )
return out_edge_count
for i in range(snake_case_ ):
if not visited[i]:
__UpperCAmelCase = 0
__UpperCAmelCase = dfs(snake_case_ , snake_case_ , -1 , snake_case_ )
__UpperCAmelCase = out_edge_count > 1
for x in range(len(snake_case_ ) ):
if is_art[x] is True:
print(snake_case_ )
# Adjacency list of graph
_lowercase : Optional[Any] = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 49 | 0 |
"""simple docstring"""
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = OrderedDict(
[
# Base model mapping
('albert', 'FlaxAlbertModel'),
('bart', 'FlaxBartModel'),
('beit', 'FlaxBeitModel'),
('bert', 'FlaxBertModel'),
('big_bird', 'FlaxBigBirdModel'),
('blenderbot', 'FlaxBlenderbotModel'),
('blenderbot-small', 'FlaxBlenderbotSmallModel'),
('clip', 'FlaxCLIPModel'),
('distilbert', 'FlaxDistilBertModel'),
('electra', 'FlaxElectraModel'),
('gpt-sw3', 'FlaxGPT2Model'),
('gpt2', 'FlaxGPT2Model'),
('gpt_neo', 'FlaxGPTNeoModel'),
('gptj', 'FlaxGPTJModel'),
('longt5', 'FlaxLongT5Model'),
('marian', 'FlaxMarianModel'),
('mbart', 'FlaxMBartModel'),
('mt5', 'FlaxMT5Model'),
('opt', 'FlaxOPTModel'),
('pegasus', 'FlaxPegasusModel'),
('regnet', 'FlaxRegNetModel'),
('resnet', 'FlaxResNetModel'),
('roberta', 'FlaxRobertaModel'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormModel'),
('roformer', 'FlaxRoFormerModel'),
('t5', 'FlaxT5Model'),
('vision-text-dual-encoder', 'FlaxVisionTextDualEncoderModel'),
('vit', 'FlaxViTModel'),
('wav2vec2', 'FlaxWav2Vec2Model'),
('whisper', 'FlaxWhisperModel'),
('xglm', 'FlaxXGLMModel'),
('xlm-roberta', 'FlaxXLMRobertaModel'),
]
)
UpperCamelCase__ = OrderedDict(
[
# Model for pre-training mapping
('albert', 'FlaxAlbertForPreTraining'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForPreTraining'),
('big_bird', 'FlaxBigBirdForPreTraining'),
('electra', 'FlaxElectraForPreTraining'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('t5', 'FlaxT5ForConditionalGeneration'),
('wav2vec2', 'FlaxWav2Vec2ForPreTraining'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
UpperCamelCase__ = OrderedDict(
[
# Model for Masked LM mapping
('albert', 'FlaxAlbertForMaskedLM'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForMaskedLM'),
('big_bird', 'FlaxBigBirdForMaskedLM'),
('distilbert', 'FlaxDistilBertForMaskedLM'),
('electra', 'FlaxElectraForMaskedLM'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
UpperCamelCase__ = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('bart', 'FlaxBartForConditionalGeneration'),
('blenderbot', 'FlaxBlenderbotForConditionalGeneration'),
('blenderbot-small', 'FlaxBlenderbotSmallForConditionalGeneration'),
('encoder-decoder', 'FlaxEncoderDecoderModel'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('marian', 'FlaxMarianMTModel'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('pegasus', 'FlaxPegasusForConditionalGeneration'),
('t5', 'FlaxT5ForConditionalGeneration'),
]
)
UpperCamelCase__ = OrderedDict(
[
# Model for Image-classsification
('beit', 'FlaxBeitForImageClassification'),
('regnet', 'FlaxRegNetForImageClassification'),
('resnet', 'FlaxResNetForImageClassification'),
('vit', 'FlaxViTForImageClassification'),
]
)
UpperCamelCase__ = OrderedDict(
[
('vision-encoder-decoder', 'FlaxVisionEncoderDecoderModel'),
]
)
UpperCamelCase__ = OrderedDict(
[
# Model for Causal LM mapping
('bart', 'FlaxBartForCausalLM'),
('bert', 'FlaxBertForCausalLM'),
('big_bird', 'FlaxBigBirdForCausalLM'),
('electra', 'FlaxElectraForCausalLM'),
('gpt-sw3', 'FlaxGPT2LMHeadModel'),
('gpt2', 'FlaxGPT2LMHeadModel'),
('gpt_neo', 'FlaxGPTNeoForCausalLM'),
('gptj', 'FlaxGPTJForCausalLM'),
('opt', 'FlaxOPTForCausalLM'),
('roberta', 'FlaxRobertaForCausalLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForCausalLM'),
('xglm', 'FlaxXGLMForCausalLM'),
('xlm-roberta', 'FlaxXLMRobertaForCausalLM'),
]
)
UpperCamelCase__ = OrderedDict(
[
# Model for Sequence Classification mapping
('albert', 'FlaxAlbertForSequenceClassification'),
('bart', 'FlaxBartForSequenceClassification'),
('bert', 'FlaxBertForSequenceClassification'),
('big_bird', 'FlaxBigBirdForSequenceClassification'),
('distilbert', 'FlaxDistilBertForSequenceClassification'),
('electra', 'FlaxElectraForSequenceClassification'),
('mbart', 'FlaxMBartForSequenceClassification'),
('roberta', 'FlaxRobertaForSequenceClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForSequenceClassification'),
('roformer', 'FlaxRoFormerForSequenceClassification'),
('xlm-roberta', 'FlaxXLMRobertaForSequenceClassification'),
]
)
UpperCamelCase__ = OrderedDict(
[
# Model for Question Answering mapping
('albert', 'FlaxAlbertForQuestionAnswering'),
('bart', 'FlaxBartForQuestionAnswering'),
('bert', 'FlaxBertForQuestionAnswering'),
('big_bird', 'FlaxBigBirdForQuestionAnswering'),
('distilbert', 'FlaxDistilBertForQuestionAnswering'),
('electra', 'FlaxElectraForQuestionAnswering'),
('mbart', 'FlaxMBartForQuestionAnswering'),
('roberta', 'FlaxRobertaForQuestionAnswering'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForQuestionAnswering'),
('roformer', 'FlaxRoFormerForQuestionAnswering'),
('xlm-roberta', 'FlaxXLMRobertaForQuestionAnswering'),
]
)
UpperCamelCase__ = OrderedDict(
[
# Model for Token Classification mapping
('albert', 'FlaxAlbertForTokenClassification'),
('bert', 'FlaxBertForTokenClassification'),
('big_bird', 'FlaxBigBirdForTokenClassification'),
('distilbert', 'FlaxDistilBertForTokenClassification'),
('electra', 'FlaxElectraForTokenClassification'),
('roberta', 'FlaxRobertaForTokenClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForTokenClassification'),
('roformer', 'FlaxRoFormerForTokenClassification'),
('xlm-roberta', 'FlaxXLMRobertaForTokenClassification'),
]
)
UpperCamelCase__ = OrderedDict(
[
# Model for Multiple Choice mapping
('albert', 'FlaxAlbertForMultipleChoice'),
('bert', 'FlaxBertForMultipleChoice'),
('big_bird', 'FlaxBigBirdForMultipleChoice'),
('distilbert', 'FlaxDistilBertForMultipleChoice'),
('electra', 'FlaxElectraForMultipleChoice'),
('roberta', 'FlaxRobertaForMultipleChoice'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMultipleChoice'),
('roformer', 'FlaxRoFormerForMultipleChoice'),
('xlm-roberta', 'FlaxXLMRobertaForMultipleChoice'),
]
)
UpperCamelCase__ = OrderedDict(
[
('bert', 'FlaxBertForNextSentencePrediction'),
]
)
UpperCamelCase__ = OrderedDict(
[
('speech-encoder-decoder', 'FlaxSpeechEncoderDecoderModel'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
]
)
UpperCamelCase__ = OrderedDict(
[
('whisper', 'FlaxWhisperForAudioClassification'),
]
)
UpperCamelCase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
UpperCamelCase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
UpperCamelCase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
UpperCamelCase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
UpperCamelCase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
UpperCamelCase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
UpperCamelCase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
UpperCamelCase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
UpperCamelCase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
UpperCamelCase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
UpperCamelCase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
UpperCamelCase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
UpperCamelCase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
UpperCamelCase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class a ( _BaseAutoModelClass ):
UpperCamelCase : Tuple = FLAX_MODEL_MAPPING
UpperCamelCase__ = auto_class_update(FlaxAutoModel)
class a ( _BaseAutoModelClass ):
UpperCamelCase : str = FLAX_MODEL_FOR_PRETRAINING_MAPPING
UpperCamelCase__ = auto_class_update(FlaxAutoModelForPreTraining, head_doc='pretraining')
class a ( _BaseAutoModelClass ):
UpperCamelCase : Dict = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
UpperCamelCase__ = auto_class_update(FlaxAutoModelForCausalLM, head_doc='causal language modeling')
class a ( _BaseAutoModelClass ):
UpperCamelCase : Optional[int] = FLAX_MODEL_FOR_MASKED_LM_MAPPING
UpperCamelCase__ = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='masked language modeling')
class a ( _BaseAutoModelClass ):
UpperCamelCase : List[str] = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
UpperCamelCase__ = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='sequence-to-sequence language modeling', checkpoint_for_example='t5-base'
)
class a ( _BaseAutoModelClass ):
UpperCamelCase : Dict = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
UpperCamelCase__ = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='sequence classification'
)
class a ( _BaseAutoModelClass ):
UpperCamelCase : int = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
UpperCamelCase__ = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='question answering')
class a ( _BaseAutoModelClass ):
UpperCamelCase : Any = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
UpperCamelCase__ = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='token classification'
)
class a ( _BaseAutoModelClass ):
UpperCamelCase : Tuple = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
UpperCamelCase__ = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='multiple choice')
class a ( _BaseAutoModelClass ):
UpperCamelCase : Any = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
UpperCamelCase__ = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='next sentence prediction'
)
class a ( _BaseAutoModelClass ):
UpperCamelCase : str = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
UpperCamelCase__ = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='image classification'
)
class a ( _BaseAutoModelClass ):
UpperCamelCase : Any = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
UpperCamelCase__ = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='vision-to-text modeling')
class a ( _BaseAutoModelClass ):
UpperCamelCase : str = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
UpperCamelCase__ = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='sequence-to-sequence speech-to-text modeling'
)
| 254 |
"""simple docstring"""
import numpy as np
def lowerCamelCase ( _snake_case ,_snake_case ,_snake_case = 1e-1_2 ,_snake_case = 100 ,):
assert np.shape(_snake_case )[0] == np.shape(_snake_case )[1]
# Ensure proper dimensionality.
assert np.shape(_snake_case )[0] == np.shape(_snake_case )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(_snake_case ) == np.iscomplexobj(_snake_case )
UpperCAmelCase__ : Tuple = np.iscomplexobj(_snake_case )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(_snake_case ,input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
UpperCAmelCase__ : List[str] = False
UpperCAmelCase__ : int = 0
UpperCAmelCase__ : str = 0
UpperCAmelCase__ : str = 1e1_2
while not convergence:
# Multiple matrix by the vector.
UpperCAmelCase__ : List[str] = np.dot(_snake_case ,_snake_case )
# Normalize the resulting output vector.
UpperCAmelCase__ : Optional[Any] = w / np.linalg.norm(_snake_case )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
UpperCAmelCase__ : int = vector.conj().T if is_complex else vector.T
UpperCAmelCase__ : Tuple = np.dot(_snake_case ,np.dot(_snake_case ,_snake_case ) )
# Check convergence.
UpperCAmelCase__ : str = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
UpperCAmelCase__ : List[str] = True
UpperCAmelCase__ : str = lambda_
if is_complex:
UpperCAmelCase__ : str = np.real(lambda_ )
return lambda_, vector
def lowerCamelCase ( ):
UpperCAmelCase__ : Optional[Any] = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
UpperCAmelCase__ : Any = np.array([41, 4, 20] )
UpperCAmelCase__ : str = real_input_matrix.astype(np.complexaaa )
UpperCAmelCase__ : Optional[int] = np.triu(1J * complex_input_matrix ,1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
UpperCAmelCase__ : Any = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
UpperCAmelCase__ : Tuple = real_input_matrix
UpperCAmelCase__ : List[str] = real_vector
elif problem_type == "complex":
UpperCAmelCase__ : Optional[int] = complex_input_matrix
UpperCAmelCase__ : List[Any] = complex_vector
# Our implementation.
UpperCAmelCase__ , UpperCAmelCase__ : int = power_iteration(_snake_case ,_snake_case )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = np.linalg.eigh(_snake_case )
# Last eigenvalue is the maximum one.
UpperCAmelCase__ : List[Any] = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
UpperCAmelCase__ : Tuple = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1e-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(_snake_case ) - np.abs(_snake_case ) ) <= 1e-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 254 | 1 |
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
snake_case : Any = logging.get_logger()
@dataclass
class snake_case_ :
UpperCAmelCase__ : nn.Module
UpperCAmelCase__ : List[nn.Module] = field(default_factory=lowerCamelCase_ )
UpperCAmelCase__ : list = field(default_factory=lowerCamelCase_ )
def lowerCamelCase__( self :str ,__snake_case :Any ,__snake_case :Tensor ,__snake_case :Tensor ) -> List[Any]:
a__ = len(list(m.modules() ) ) == 1 or isinstance(__snake_case ,nn.Convad ) or isinstance(__snake_case ,nn.BatchNormad )
if has_not_submodules:
self.traced.append(__snake_case )
def __call__( self :int ,__snake_case :Tensor ) -> Any:
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(__snake_case )
[x.remove() for x in self.handles]
return self
@property
def lowerCamelCase__( self :str ) -> Optional[int]:
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda __snake_case : len(list(x.state_dict().keys() ) ) > 0 ,self.traced ) )
@dataclass
class snake_case_ :
UpperCAmelCase__ : nn.Module
UpperCAmelCase__ : nn.Module
UpperCAmelCase__ : int = 0
UpperCAmelCase__ : List = field(default_factory=lowerCamelCase_ )
UpperCAmelCase__ : List = field(default_factory=lowerCamelCase_ )
def __call__( self :Tuple ,__snake_case :Tensor ) -> Union[str, Any]:
a__ = Tracker(self.dest )(__snake_case ).parametrized
a__ = Tracker(self.src )(__snake_case ).parametrized
a__ = list(filter(lambda __snake_case : type(__snake_case ) not in self.src_skip ,__snake_case ) )
a__ = list(filter(lambda __snake_case : type(__snake_case ) not in self.dest_skip ,__snake_case ) )
if len(__snake_case ) != len(__snake_case ):
raise Exception(
F'Numbers of operations are different. Source module has {len(__snake_case )} operations while'
F' destination module has {len(__snake_case )}.' )
for dest_m, src_m in zip(__snake_case ,__snake_case ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F'Transfered from={src_m} to={dest_m}' )
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : ResNetConfig , __lowerCAmelCase : Path , __lowerCAmelCase : bool = True ):
print(F'Converting {name}...' )
with torch.no_grad():
a__ = timm.create_model(__lowerCAmelCase , pretrained=__lowerCAmelCase ).eval()
a__ = ResNetForImageClassification(__lowerCAmelCase ).eval()
a__ = ModuleTransfer(src=__lowerCAmelCase , dest=__lowerCAmelCase )
a__ = torch.randn((1, 3, 2_2_4, 2_2_4) )
module_transfer(__lowerCAmelCase )
assert torch.allclose(from_model(__lowerCAmelCase ) , our_model(__lowerCAmelCase ).logits ), "The model logits don't match the original one."
a__ = F'resnet{"-".join(name.split("resnet" ) )}'
print(__lowerCAmelCase )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='Add model' , use_temp_dir=__lowerCAmelCase , )
# we can use the convnext one
a__ = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='Add image processor' , use_temp_dir=__lowerCAmelCase , )
print(F'Pushed {checkpoint_name}' )
def __lowercase ( __lowerCAmelCase : Path , __lowerCAmelCase : str = None , __lowerCAmelCase : bool = True ):
a__ = 'imagenet-1k-id2label.json'
a__ = 1_0_0_0
a__ = (1, num_labels)
a__ = 'huggingface/label-files'
a__ = num_labels
a__ = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type='dataset' ) , 'r' ) )
a__ = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
a__ = idalabel
a__ = {v: k for k, v in idalabel.items()}
a__ = partial(__lowerCAmelCase , num_labels=__lowerCAmelCase , idalabel=__lowerCAmelCase , labelaid=__lowerCAmelCase )
a__ = {
'resnet18': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[6_4, 1_2_8, 2_5_6, 5_1_2] , layer_type='basic' ),
'resnet26': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type='bottleneck' ),
'resnet34': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[6_4, 1_2_8, 2_5_6, 5_1_2] , layer_type='basic' ),
'resnet50': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type='bottleneck' ),
'resnet101': ImageNetPreTrainedConfig(
depths=[3, 4, 2_3, 3] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type='bottleneck' ),
'resnet152': ImageNetPreTrainedConfig(
depths=[3, 8, 3_6, 3] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type='bottleneck' ),
}
if model_name:
convert_weight_and_push(__lowerCAmelCase , names_to_config[model_name] , __lowerCAmelCase , __lowerCAmelCase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return config, expected_shape
if __name__ == "__main__":
snake_case : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help=(
'''The name of the model you wish to convert, it must be one of the supported resnet* architecture,'''
''' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=Path,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
default=True,
type=bool,
required=False,
help='''If True, push model and image processor to the hub.''',
)
snake_case : int = parser.parse_args()
snake_case : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 335 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
snake_case : Optional[Any] = {
'''configuration_roberta_prelayernorm''': [
'''ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''RobertaPreLayerNormConfig''',
'''RobertaPreLayerNormOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : List[Any] = [
'''ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RobertaPreLayerNormForCausalLM''',
'''RobertaPreLayerNormForMaskedLM''',
'''RobertaPreLayerNormForMultipleChoice''',
'''RobertaPreLayerNormForQuestionAnswering''',
'''RobertaPreLayerNormForSequenceClassification''',
'''RobertaPreLayerNormForTokenClassification''',
'''RobertaPreLayerNormModel''',
'''RobertaPreLayerNormPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : str = [
'''TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRobertaPreLayerNormForCausalLM''',
'''TFRobertaPreLayerNormForMaskedLM''',
'''TFRobertaPreLayerNormForMultipleChoice''',
'''TFRobertaPreLayerNormForQuestionAnswering''',
'''TFRobertaPreLayerNormForSequenceClassification''',
'''TFRobertaPreLayerNormForTokenClassification''',
'''TFRobertaPreLayerNormMainLayer''',
'''TFRobertaPreLayerNormModel''',
'''TFRobertaPreLayerNormPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Any = [
'''FlaxRobertaPreLayerNormForCausalLM''',
'''FlaxRobertaPreLayerNormForMaskedLM''',
'''FlaxRobertaPreLayerNormForMultipleChoice''',
'''FlaxRobertaPreLayerNormForQuestionAnswering''',
'''FlaxRobertaPreLayerNormForSequenceClassification''',
'''FlaxRobertaPreLayerNormForTokenClassification''',
'''FlaxRobertaPreLayerNormModel''',
'''FlaxRobertaPreLayerNormPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
snake_case : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 335 | 1 |
"""simple docstring"""
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def _SCREAMING_SNAKE_CASE ( __snake_case : Tuple , __snake_case : str , __snake_case : Union[str, Any] , __snake_case : Optional[Any] , __snake_case : Any = None , __snake_case : Any = None , __snake_case : Any = None , ):
'''simple docstring'''
if config_name_or_path is None:
lowercase = 'facebook/rag-token-base' if model_type == 'rag_token' else 'facebook/rag-sequence-base'
if generator_tokenizer_name_or_path is None:
lowercase = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
lowercase = question_encoder_name_or_path
lowercase = RagTokenForGeneration if model_type == 'rag_token' else RagSequenceForGeneration
# Save model.
lowercase = RagConfig.from_pretrained(_lowerCAmelCase )
lowercase = AutoConfig.from_pretrained(_lowerCAmelCase )
lowercase = AutoConfig.from_pretrained(_lowerCAmelCase )
lowercase = gen_config
lowercase = question_encoder_config
lowercase = model_class.from_pretrained_question_encoder_generator(
_lowerCAmelCase , _lowerCAmelCase , config=_lowerCAmelCase )
rag_model.save_pretrained(_lowerCAmelCase )
# Sanity check.
model_class.from_pretrained(_lowerCAmelCase )
# Save tokenizers.
lowercase = AutoTokenizer.from_pretrained(_lowerCAmelCase )
gen_tokenizer.save_pretrained(dest_dir / 'generator_tokenizer/' )
lowercase = AutoTokenizer.from_pretrained(_lowerCAmelCase )
question_encoder_tokenizer.save_pretrained(dest_dir / 'question_encoder_tokenizer/' )
if __name__ == "__main__":
_UpperCamelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'--model_type',
choices=['rag_sequence', 'rag_token'],
required=True,
type=str,
help='RAG model type: rag_sequence, rag_token',
)
parser.add_argument('--dest', type=str, required=True, help='Path to the output checkpoint directory.')
parser.add_argument('--generator_name_or_path', type=str, required=True, help='Generator model identifier')
parser.add_argument(
'--question_encoder_name_or_path', type=str, required=True, help='Question encoder model identifier'
)
parser.add_argument(
'--generator_tokenizer_name_or_path',
type=str,
help='Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``',
)
parser.add_argument(
'--question_encoder_tokenizer_name_or_path',
type=str,
help='Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``',
)
parser.add_argument(
'--config_name_or_path',
type=str,
help=(
'Identifier of the model config to use, if not provided, resolves to a base config for a given'
' ``model_type``'
),
)
_UpperCamelCase : Dict = parser.parse_args()
_UpperCamelCase : Any = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 701 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class a ( unittest.TestCase ):
@property
def UpperCamelCase_ ( self ):
torch.manual_seed(0 )
lowercase = UNetaDModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
@property
def UpperCamelCase_ ( self ):
torch.manual_seed(0 )
lowercase = VQModel(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , )
return model
@property
def UpperCamelCase_ ( self ):
torch.manual_seed(0 )
lowercase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModel(_lowerCamelCase )
def UpperCamelCase_ ( self ):
lowercase = self.dummy_uncond_unet
lowercase = DDIMScheduler()
lowercase = self.dummy_vq_model
lowercase = LDMPipeline(unet=_lowerCamelCase , vqvae=_lowerCamelCase , scheduler=_lowerCamelCase )
ldm.to(_lowerCamelCase )
ldm.set_progress_bar_config(disable=_lowerCamelCase )
lowercase = torch.manual_seed(0 )
lowercase = ldm(generator=_lowerCamelCase , num_inference_steps=2 , output_type='numpy' ).images
lowercase = torch.manual_seed(0 )
lowercase = ldm(generator=_lowerCamelCase , num_inference_steps=2 , output_type='numpy' , return_dict=_lowerCamelCase )[0]
lowercase = image[0, -3:, -3:, -1]
lowercase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
lowercase = np.array([0.8_5_1_2, 0.8_1_8, 0.6_4_1_1, 0.6_8_0_8, 0.4_4_6_5, 0.5_6_1_8, 0.4_6, 0.6_2_3_1, 0.5_1_7_2] )
lowercase = 1e-2 if torch_device != 'mps' else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class a ( unittest.TestCase ):
def UpperCamelCase_ ( self ):
lowercase = LDMPipeline.from_pretrained('CompVis/ldm-celebahq-256' )
ldm.to(_lowerCamelCase )
ldm.set_progress_bar_config(disable=_lowerCamelCase )
lowercase = torch.manual_seed(0 )
lowercase = ldm(generator=_lowerCamelCase , num_inference_steps=5 , output_type='numpy' ).images
lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_5_6, 2_5_6, 3)
lowercase = np.array([0.4_3_9_9, 0.4_4_9_7_5, 0.4_6_8_2_5, 0.4_7_4, 0.4_3_5_9, 0.4_5_8_1, 0.4_5_0_9_5, 0.4_3_4_1, 0.4_4_4_7] )
lowercase = 1e-2 if torch_device != 'mps' else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 134 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase =logging.get_logger(__name__)
UpperCAmelCase ={
"xlm-roberta-base": "https://huggingface.co/xlm-roberta-base/resolve/main/config.json",
"xlm-roberta-large": "https://huggingface.co/xlm-roberta-large/resolve/main/config.json",
"xlm-roberta-large-finetuned-conll02-dutch": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json"
),
"xlm-roberta-large-finetuned-conll02-spanish": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json"
),
"xlm-roberta-large-finetuned-conll03-english": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json"
),
"xlm-roberta-large-finetuned-conll03-german": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json"
),
}
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowerCamelCase = '''xlm-roberta'''
def __init__( self ,lowerCamelCase_=3_0_5_2_2 ,lowerCamelCase_=7_6_8 ,lowerCamelCase_=1_2 ,lowerCamelCase_=1_2 ,lowerCamelCase_=3_0_7_2 ,lowerCamelCase_="gelu" ,lowerCamelCase_=0.1 ,lowerCamelCase_=0.1 ,lowerCamelCase_=5_1_2 ,lowerCamelCase_=2 ,lowerCamelCase_=0.02 ,lowerCamelCase_=1E-12 ,lowerCamelCase_=1 ,lowerCamelCase_=0 ,lowerCamelCase_=2 ,lowerCamelCase_="absolute" ,lowerCamelCase_=True ,lowerCamelCase_=None ,**lowerCamelCase_ ,) -> Optional[Any]:
super().__init__(pad_token_id=lowerCamelCase_ ,bos_token_id=lowerCamelCase_ ,eos_token_id=lowerCamelCase_ ,**lowerCamelCase_ )
A = vocab_size
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = hidden_act
A = intermediate_size
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = type_vocab_size
A = initializer_range
A = layer_norm_eps
A = position_embedding_type
A = use_cache
A = classifier_dropout
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def UpperCamelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
A = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
A = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 617 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def _A ( _a : str , _a : str ):
"""simple docstring"""
A = list(_a )
A = list(_a )
A = 0
for i in range(len(_a ) ):
if lista[i] != lista[i]:
count += 1
A = """_"""
if count > 1:
return False
else:
return "".join(_a )
def _A ( _a : list[str] ):
"""simple docstring"""
A = []
while True:
A = ["""$"""] * len(_a )
A = []
for i in range(len(_a ) ):
for j in range(i + 1 , len(_a ) ):
A = compare_string(binary[i] , binary[j] )
if k is False:
A = """*"""
A = """*"""
temp.append("""X""" )
for i in range(len(_a ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(_a ) == 0:
return pi
A = list(set(_a ) )
def _A ( _a : int , _a : Sequence[float] ):
"""simple docstring"""
A = []
for minterm in minterms:
A = """"""
for _ in range(_a ):
A = str(minterm % 2 ) + string
minterm //= 2
temp.append(_a )
return temp
def _A ( _a : str , _a : str , _a : int ):
"""simple docstring"""
A = list(_a )
A = list(_a )
A = 0
for i in range(len(_a ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def _A ( _a : list[list[int]] , _a : list[str] ):
"""simple docstring"""
A = []
A = [0] * len(_a )
for i in range(len(chart[0] ) ):
A = 0
A = -1
for j in range(len(_a ) ):
if chart[j][i] == 1:
count += 1
A = j
if count == 1:
A = 1
for i in range(len(_a ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(_a ) ):
A = 0
temp.append(prime_implicants[i] )
while True:
A = 0
A = -1
A = 0
for i in range(len(_a ) ):
A = chart[i].count(1 )
if count_n > max_n:
A = count_n
A = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(_a ) ):
A = 0
def _A ( _a : list[str] , _a : list[str] ):
"""simple docstring"""
A = [[0 for x in range(len(_a ) )] for x in range(len(_a ) )]
for i in range(len(_a ) ):
A = prime_implicants[i].count("""_""" )
for j in range(len(_a ) ):
if is_for_table(prime_implicants[i] , binary[j] , _a ):
A = 1
return chart
def _A ( ):
"""simple docstring"""
A = int(input("""Enter the no. of variables\n""" ) )
A = [
float(_a )
for x in input(
"""Enter the decimal representation of Minterms 'Spaces Separated'\n""" ).split()
]
A = decimal_to_binary(_a , _a )
A = check(_a )
print("""Prime Implicants are:""" )
print(_a )
A = prime_implicant_chart(_a , _a )
A = selection(_a , _a )
print("""Essential Prime Implicants are:""" )
print(_a )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 617 | 1 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
a : Any = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
a : Tuple = 250004
a : int = 250020
@require_sentencepiece
@require_tokenizers
class __UpperCamelCase ( a__ , unittest.TestCase ):
lowerCamelCase : List[Any] =MBartaaTokenizer
lowerCamelCase : List[str] =MBartaaTokenizerFast
lowerCamelCase : Optional[int] =True
lowerCamelCase : List[Any] =True
def __a ( self ) -> str:
super().setUp()
# We have a SentencePiece fixture for testing
a : Any = MBartaaTokenizer(__UpperCamelCase , src_lang="en_XX" , tgt_lang="ro_RO" , keep_accents=__UpperCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def __a ( self ) -> int:
a : int = "<s>"
a : Tuple = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCamelCase ) , __UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCamelCase ) , __UpperCamelCase )
def __a ( self ) -> List[str]:
a : Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(__UpperCamelCase ) , 1054 )
def __a ( self ) -> List[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 1054 )
def __a ( self ) -> Optional[int]:
a : str = MBartaaTokenizer(__UpperCamelCase , src_lang="en_XX" , tgt_lang="ro_RO" , keep_accents=__UpperCamelCase )
a : Any = tokenizer.tokenize("This is a test" )
self.assertListEqual(__UpperCamelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
a : List[Any] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__UpperCamelCase , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", "."] , )
a : int = tokenizer.convert_tokens_to_ids(__UpperCamelCase )
self.assertListEqual(
__UpperCamelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
a : int = tokenizer.convert_ids_to_tokens(__UpperCamelCase )
self.assertListEqual(
__UpperCamelCase , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", "."] , )
@slow
def __a ( self ) -> List[str]:
# fmt: off
a : Tuple = {"input_ids": [[25_0004, 1_1062, 8_2772, 7, 15, 8_2772, 538, 5_1529, 237, 1_7198, 1290, 206, 9, 21_5175, 1314, 136, 1_7198, 1290, 206, 9, 5_6359, 42, 12_2009, 9, 1_6466, 16, 8_7344, 4537, 9, 4717, 7_8381, 6, 15_9958, 7, 15, 2_4480, 618, 4, 527, 2_2693, 5428, 4, 2777, 2_4480, 9874, 4, 4_3523, 594, 4, 803, 1_8392, 3_3189, 18, 4, 4_3523, 2_4447, 1_2399, 100, 2_4955, 8_3658, 9626, 14_4057, 15, 839, 2_2335, 16, 136, 2_4955, 8_3658, 8_3479, 15, 3_9102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 12_2009, 11_5774, 23, 805, 1328, 4_6876, 7, 136, 5_3894, 1940, 4_2227, 4_1159, 1_7721, 823, 425, 4, 2_7512, 9_8722, 206, 136, 5531, 4970, 919, 1_7336, 5, 2], [25_0004, 2_0080, 618, 83, 8_2775, 47, 479, 9, 1517, 73, 5_3894, 333, 8_0581, 11_0117, 1_8811, 5256, 1295, 51, 15_2526, 297, 7986, 390, 12_4416, 538, 3_5431, 214, 98, 1_5044, 2_5737, 136, 7108, 4_3701, 23, 756, 13_5355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_0004, 581, 6_3773, 11_9455, 6, 14_7797, 8_8203, 7, 645, 70, 21, 3285, 1_0269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCamelCase , model_name="facebook/mbart-large-50" , revision="d3913889c59cd5c9e456b269c376325eabad57e2" , )
def __a ( self ) -> Optional[Any]:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
a : List[str] = (self.rust_tokenizer_class, "hf-internal-testing/tiny-random-mbart50", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
a : Dict = self.rust_tokenizer_class.from_pretrained(__UpperCamelCase , **__UpperCamelCase )
a : str = self.tokenizer_class.from_pretrained(__UpperCamelCase , **__UpperCamelCase )
a : str = tempfile.mkdtemp()
a : str = tokenizer_r.save_pretrained(__UpperCamelCase )
a : Optional[Any] = tokenizer_p.save_pretrained(__UpperCamelCase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
a : Tuple = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f )
self.assertSequenceEqual(__UpperCamelCase , __UpperCamelCase )
# Checks everything loads correctly in the same way
a : Union[str, Any] = tokenizer_r.from_pretrained(__UpperCamelCase )
a : Optional[Any] = tokenizer_p.from_pretrained(__UpperCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__UpperCamelCase , __UpperCamelCase ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(__UpperCamelCase )
# Save tokenizer rust, legacy_format=True
a : int = tempfile.mkdtemp()
a : Tuple = tokenizer_r.save_pretrained(__UpperCamelCase , legacy_format=__UpperCamelCase )
a : Optional[int] = tokenizer_p.save_pretrained(__UpperCamelCase )
# Checks it save with the same files
self.assertSequenceEqual(__UpperCamelCase , __UpperCamelCase )
# Checks everything loads correctly in the same way
a : Optional[int] = tokenizer_r.from_pretrained(__UpperCamelCase )
a : Optional[Any] = tokenizer_p.from_pretrained(__UpperCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__UpperCamelCase , __UpperCamelCase ) )
shutil.rmtree(__UpperCamelCase )
# Save tokenizer rust, legacy_format=False
a : Tuple = tempfile.mkdtemp()
a : Optional[Any] = tokenizer_r.save_pretrained(__UpperCamelCase , legacy_format=__UpperCamelCase )
a : Optional[Any] = tokenizer_p.save_pretrained(__UpperCamelCase )
# Checks it saved the tokenizer.json file
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
a : Tuple = tokenizer_r.from_pretrained(__UpperCamelCase )
a : Optional[Any] = tokenizer_p.from_pretrained(__UpperCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__UpperCamelCase , __UpperCamelCase ) )
shutil.rmtree(__UpperCamelCase )
@require_torch
@require_sentencepiece
@require_tokenizers
class __UpperCamelCase ( unittest.TestCase ):
lowerCamelCase : int ="""facebook/mbart-large-50-one-to-many-mmt"""
lowerCamelCase : Tuple =[
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""",
]
lowerCamelCase : List[Any] =[
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"""
""" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"""
""" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
lowerCamelCase : Optional[int] =[EN_CODE, 8274, 12_7873, 2_5916, 7, 8622, 2071, 438, 6_7485, 53, 18_7895, 23, 5_1712, 2]
@classmethod
def __a ( cls ) -> int:
a : List[Any] = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="en_XX" , tgt_lang="ro_RO" )
a : Optional[int] = 1
return cls
def __a ( self ) -> Union[str, Any]:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ar_AR"] , 25_0001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["en_EN"] , 25_0004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ro_RO"] , 25_0020 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["mr_IN"] , 25_0038 )
def __a ( self ) -> Union[str, Any]:
a : Tuple = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __UpperCamelCase )
def __a ( self ) -> Optional[int]:
self.assertIn(__UpperCamelCase , self.tokenizer.all_special_ids )
a : Optional[Any] = [RO_CODE, 884, 9019, 96, 9, 916, 8_6792, 36, 1_8743, 1_5596, 5, 2]
a : List[str] = self.tokenizer.decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase )
a : Any = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__UpperCamelCase )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
self.assertNotIn(self.tokenizer.eos_token , __UpperCamelCase )
def __a ( self ) -> Tuple:
a : List[Any] = ["this is gunna be a long sentence " * 20]
assert isinstance(src_text[0] , __UpperCamelCase )
a : Union[str, Any] = 10
a : int = self.tokenizer(__UpperCamelCase , max_length=__UpperCamelCase , truncation=__UpperCamelCase ).input_ids[0]
self.assertEqual(ids[0] , __UpperCamelCase )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase )
def __a ( self ) -> int:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "ar_AR"] ) , [25_0053, 25_0001] )
def __a ( self ) -> Any:
a : List[Any] = tempfile.mkdtemp()
a : List[Any] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__UpperCamelCase )
a : Union[str, Any] = MBartaaTokenizer.from_pretrained(__UpperCamelCase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __UpperCamelCase )
@require_torch
def __a ( self ) -> int:
a : Optional[int] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__UpperCamelCase , return_tensors="pt" )
a : Union[str, Any] = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def __a ( self ) -> Optional[int]:
a : Tuple = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , )
a : Dict = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
a : List[str] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __UpperCamelCase )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def __a ( self ) -> Dict:
a : Optional[int] = self.tokenizer(self.src_text , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=3 , return_tensors="pt" )
a : List[Any] = self.tokenizer(
text_target=self.tgt_text , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=10 , return_tensors="pt" )
a : Optional[int] = targets["input_ids"]
a : str = shift_tokens_right(__UpperCamelCase , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def __a ( self ) -> Dict:
a : Tuple = self.tokenizer._build_translation_inputs(
"A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="ar_AR" )
self.assertEqual(
nested_simplify(__UpperCamelCase ) , {
# en_XX, A, test, EOS
"input_ids": [[25_0004, 62, 3034, 2]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 25_0001,
} , )
| 714 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __UpperCamelCase ( metaclass=a__ ):
lowerCamelCase : Optional[Any] =["""transformers""", """torch""", """note_seq"""]
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Union[str, Any]:
requires_backends(self , ["transformers", "torch", "note_seq"] )
@classmethod
def __a ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Any:
requires_backends(cls , ["transformers", "torch", "note_seq"] )
@classmethod
def __a ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> int:
requires_backends(cls , ["transformers", "torch", "note_seq"] )
| 31 | 0 |
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self, lowerCamelCase__ = 16, lowerCamelCase__ = 88, lowerCamelCase__ = None, lowerCamelCase__ = 1, lowerCamelCase__ = 0.0, lowerCamelCase__ = 32, lowerCamelCase__ = None, lowerCamelCase__ = False, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = "geglu", lowerCamelCase__ = None, ):
super().__init__()
A : Optional[int] = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=lowerCamelCase__, attention_head_dim=lowerCamelCase__, in_channels=lowerCamelCase__, num_layers=lowerCamelCase__, dropout=lowerCamelCase__, norm_num_groups=lowerCamelCase__, cross_attention_dim=lowerCamelCase__, attention_bias=lowerCamelCase__, sample_size=lowerCamelCase__, num_vector_embeds=lowerCamelCase__, activation_fn=lowerCamelCase__, num_embeds_ada_norm=lowerCamelCase__, )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
A : Optional[Any] = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
A : str = [77, 257]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
A : str = [1, 0]
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__=None, lowerCamelCase__=None, lowerCamelCase__=None, lowerCamelCase__ = True, ):
A : Optional[Any] = hidden_states
A : int = []
A : Optional[int] = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
A : Optional[int] = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
A : Tuple = self.transformer_index_for_condition[i]
A : Union[str, Any] = self.transformers[transformer_index](
lowerCamelCase__, encoder_hidden_states=lowerCamelCase__, timestep=lowerCamelCase__, cross_attention_kwargs=lowerCamelCase__, return_dict=lowerCamelCase__, )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
A : List[str] = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
A : Optional[Any] = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=lowerCamelCase__ )
| 662 |
from typing import Any
import numpy as np
def __UpperCamelCase ( _lowerCAmelCase ) -> bool:
"""simple docstring"""
return np.array_equal(_lowerCAmelCase , matrix.conjugate().T )
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> Any:
"""simple docstring"""
A : Any = v.conjugate().T
A : List[Any] = v_star.dot(_lowerCAmelCase )
assert isinstance(_lowerCAmelCase , np.ndarray )
return (v_star_dot.dot(_lowerCAmelCase )) / (v_star.dot(_lowerCAmelCase ))
def __UpperCamelCase ( ) -> None:
"""simple docstring"""
A : Any = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
A : str = np.array([[1], [2], [3]] )
assert is_hermitian(_lowerCAmelCase ), f'''{a} is not hermitian.'''
print(rayleigh_quotient(_lowerCAmelCase , _lowerCAmelCase ) )
A : Tuple = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(_lowerCAmelCase ), f'''{a} is not hermitian.'''
assert rayleigh_quotient(_lowerCAmelCase , _lowerCAmelCase ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 662 | 1 |
'''simple docstring'''
import inspect
import unittest
from math import floor
from transformers import CvtConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import CvtForImageClassification, CvtModel
from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case_ ( A__ ):
"""simple docstring"""
def __UpperCAmelCase ( self):
lowerCamelCase__ = self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(UpperCamelCase , "embed_dim"))
self.parent.assertTrue(hasattr(UpperCamelCase , "num_heads"))
class snake_case_ :
"""simple docstring"""
def __init__( self , UpperCamelCase , UpperCamelCase=13 , UpperCamelCase=64 , UpperCamelCase=3 , UpperCamelCase=[16, 48, 96] , UpperCamelCase=[1, 3, 6] , UpperCamelCase=[1, 2, 10] , UpperCamelCase=[7, 3, 3] , UpperCamelCase=[4, 2, 2] , UpperCamelCase=[2, 1, 1] , UpperCamelCase=[2, 2, 2] , UpperCamelCase=[False, False, True] , UpperCamelCase=[0.0, 0.0, 0.0] , UpperCamelCase=0.0_2 , UpperCamelCase=1E-1_2 , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=2 , ):
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = image_size
lowerCamelCase__ = patch_sizes
lowerCamelCase__ = patch_stride
lowerCamelCase__ = patch_padding
lowerCamelCase__ = is_training
lowerCamelCase__ = use_labels
lowerCamelCase__ = num_labels
lowerCamelCase__ = num_channels
lowerCamelCase__ = embed_dim
lowerCamelCase__ = num_heads
lowerCamelCase__ = stride_kv
lowerCamelCase__ = depth
lowerCamelCase__ = cls_token
lowerCamelCase__ = attention_drop_rate
lowerCamelCase__ = initializer_range
lowerCamelCase__ = layer_norm_eps
def __UpperCAmelCase ( self):
lowerCamelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
lowerCamelCase__ = None
if self.use_labels:
lowerCamelCase__ = ids_tensor([self.batch_size] , self.num_labels)
lowerCamelCase__ = self.get_config()
return config, pixel_values, labels
def __UpperCAmelCase ( self):
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def __UpperCAmelCase ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase):
lowerCamelCase__ = CvtModel(config=UpperCamelCase)
model.to(UpperCamelCase)
model.eval()
lowerCamelCase__ = model(UpperCamelCase)
lowerCamelCase__ = (self.image_size, self.image_size)
lowerCamelCase__ , lowerCamelCase__ = image_size[0], image_size[1]
for i in range(len(self.depth)):
lowerCamelCase__ = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1)
lowerCamelCase__ = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width))
def __UpperCAmelCase ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase):
lowerCamelCase__ = self.num_labels
lowerCamelCase__ = CvtForImageClassification(UpperCamelCase)
model.to(UpperCamelCase)
model.eval()
lowerCamelCase__ = model(UpperCamelCase , labels=UpperCamelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def __UpperCAmelCase ( self):
lowerCamelCase__ = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = config_and_inputs
lowerCamelCase__ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class snake_case_ ( A__ , A__ , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase : List[Any] =(CvtModel, CvtForImageClassification) if is_torch_available() else ()
__lowerCAmelCase : Dict =(
{'''feature-extraction''': CvtModel, '''image-classification''': CvtForImageClassification}
if is_torch_available()
else {}
)
__lowerCAmelCase : int =False
__lowerCAmelCase : List[str] =False
__lowerCAmelCase : str =False
__lowerCAmelCase : Dict =False
__lowerCAmelCase : Optional[int] =False
def __UpperCAmelCase ( self):
lowerCamelCase__ = CvtModelTester(self)
lowerCamelCase__ = ConfigTester(self , config_class=UpperCamelCase , has_text_modality=UpperCamelCase , hidden_size=37)
def __UpperCAmelCase ( self):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCAmelCase ( self):
return
@unittest.skip(reason="Cvt does not output attentions")
def __UpperCAmelCase ( self):
pass
@unittest.skip(reason="Cvt does not use inputs_embeds")
def __UpperCAmelCase ( self):
pass
@unittest.skip(reason="Cvt does not support input and output embeddings")
def __UpperCAmelCase ( self):
pass
def __UpperCAmelCase ( self):
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ = model_class(UpperCamelCase)
lowerCamelCase__ = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ = [*signature.parameters.keys()]
lowerCamelCase__ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCamelCase)
def __UpperCAmelCase ( self):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase)
def __UpperCAmelCase ( self):
def check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase):
lowerCamelCase__ = model_class(UpperCamelCase)
model.to(UpperCamelCase)
model.eval()
with torch.no_grad():
lowerCamelCase__ = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase))
lowerCamelCase__ = outputs.hidden_states
lowerCamelCase__ = len(self.model_tester.depth)
self.assertEqual(len(UpperCamelCase) , UpperCamelCase)
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:]) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ = True
check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase__ = True
check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase)
def __UpperCAmelCase ( self):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase)
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests.")
def __UpperCAmelCase ( self):
pass
@slow
def __UpperCAmelCase ( self):
for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ = CvtModel.from_pretrained(UpperCamelCase)
self.assertIsNotNone(UpperCamelCase)
def lowerCAmelCase( ):
'''simple docstring'''
lowerCamelCase__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __UpperCAmelCase ( self):
return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0])
@slow
def __UpperCAmelCase ( self):
lowerCamelCase__ = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0]).to(UpperCamelCase)
lowerCamelCase__ = self.default_image_processor
lowerCamelCase__ = prepare_img()
lowerCamelCase__ = image_processor(images=UpperCamelCase , return_tensors="pt").to(UpperCamelCase)
# forward pass
with torch.no_grad():
lowerCamelCase__ = model(**UpperCamelCase)
# verify the logits
lowerCamelCase__ = torch.Size((1, 10_00))
self.assertEqual(outputs.logits.shape , UpperCamelCase)
lowerCamelCase__ = torch.tensor([0.9_2_8_5, 0.9_0_1_5, -0.3_1_5_0]).to(UpperCamelCase)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase , atol=1E-4))
| 426 |
'''simple docstring'''
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger(__name__)
set_seed(7_7_0)
lowerCAmelCase_ = {
"c_attn": "att_proj",
"c_proj": "out_proj",
"c_fc": "in_proj",
"transformer.": "",
"h.": "layers.",
"ln_1": "layernorm_1",
"ln_2": "layernorm_2",
"ln_f": "layernorm_final",
"wpe": "position_embeds_layer",
"wte": "input_embeds_layer",
}
lowerCAmelCase_ = {
"text_small": {
"repo_id": "suno/bark",
"file_name": "text.pt",
},
"coarse_small": {
"repo_id": "suno/bark",
"file_name": "coarse.pt",
},
"fine_small": {
"repo_id": "suno/bark",
"file_name": "fine.pt",
},
"text": {
"repo_id": "suno/bark",
"file_name": "text_2.pt",
},
"coarse": {
"repo_id": "suno/bark",
"file_name": "coarse_2.pt",
},
"fine": {
"repo_id": "suno/bark",
"file_name": "fine_2.pt",
},
}
lowerCAmelCase_ = os.path.dirname(os.path.abspath(__file__))
lowerCAmelCase_ = os.path.join(os.path.expanduser("~"), ".cache")
lowerCAmelCase_ = os.path.join(os.getenv("XDG_CACHE_HOME", default_cache_dir), "suno", "bark_v0")
def lowerCAmelCase( a__ : Dict , a__ : Union[str, Any]=False ):
'''simple docstring'''
lowerCamelCase__ = model_type
if use_small:
key += "_small"
return os.path.join(a__ , REMOTE_MODEL_PATHS[key]["file_name"] )
def lowerCAmelCase( a__ : Optional[Any] , a__ : Union[str, Any] ):
'''simple docstring'''
os.makedirs(a__ , exist_ok=a__ )
hf_hub_download(repo_id=a__ , filename=a__ , local_dir=a__ )
def lowerCAmelCase( a__ : List[Any] , a__ : Optional[int] , a__ : Union[str, Any]=False , a__ : str="text" ):
'''simple docstring'''
if model_type == "text":
lowerCamelCase__ = BarkSemanticModel
lowerCamelCase__ = BarkSemanticConfig
lowerCamelCase__ = BarkSemanticGenerationConfig
elif model_type == "coarse":
lowerCamelCase__ = BarkCoarseModel
lowerCamelCase__ = BarkCoarseConfig
lowerCamelCase__ = BarkCoarseGenerationConfig
elif model_type == "fine":
lowerCamelCase__ = BarkFineModel
lowerCamelCase__ = BarkFineConfig
lowerCamelCase__ = BarkFineGenerationConfig
else:
raise NotImplementedError()
lowerCamelCase__ = f"""{model_type}_small""" if use_small else model_type
lowerCamelCase__ = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(a__ ):
logger.info(f"""{model_type} model not found, downloading into `{CACHE_DIR}`.""" )
_download(model_info["repo_id"] , model_info["file_name"] )
lowerCamelCase__ = torch.load(a__ , map_location=a__ )
# this is a hack
lowerCamelCase__ = checkpoint["model_args"]
if "input_vocab_size" not in model_args:
lowerCamelCase__ = model_args["vocab_size"]
lowerCamelCase__ = model_args["vocab_size"]
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
lowerCamelCase__ = model_args.pop("n_head" )
lowerCamelCase__ = model_args.pop("n_embd" )
lowerCamelCase__ = model_args.pop("n_layer" )
lowerCamelCase__ = ConfigClass(**checkpoint["model_args"] )
lowerCamelCase__ = ModelClass(config=a__ )
lowerCamelCase__ = GenerationConfigClass()
lowerCamelCase__ = model_generation_config
lowerCamelCase__ = checkpoint["model"]
# fixup checkpoint
lowerCamelCase__ = "_orig_mod."
for k, v in list(state_dict.items() ):
if k.startswith(a__ ):
# replace part of the key with corresponding layer name in HF implementation
lowerCamelCase__ = k[len(a__ ) :]
for old_layer_name in new_layer_name_dict:
lowerCamelCase__ = new_k.replace(a__ , new_layer_name_dict[old_layer_name] )
lowerCamelCase__ = state_dict.pop(a__ )
lowerCamelCase__ = set(state_dict.keys() ) - set(model.state_dict().keys() )
lowerCamelCase__ = {k for k in extra_keys if not k.endswith(".attn.bias" )}
lowerCamelCase__ = set(model.state_dict().keys() ) - set(state_dict.keys() )
lowerCamelCase__ = {k for k in missing_keys if not k.endswith(".attn.bias" )}
if len(a__ ) != 0:
raise ValueError(f"""extra keys found: {extra_keys}""" )
if len(a__ ) != 0:
raise ValueError(f"""missing keys: {missing_keys}""" )
model.load_state_dict(a__ , strict=a__ )
lowerCamelCase__ = model.num_parameters(exclude_embeddings=a__ )
lowerCamelCase__ = checkpoint["best_val_loss"].item()
logger.info(f"""model loaded: {round(n_params/1E6 , 1 )}M params, {round(a__ , 3 )} loss""" )
model.eval()
model.to(a__ )
del checkpoint, state_dict
return model
def lowerCAmelCase( a__ : Tuple , a__ : List[Any]=False , a__ : Optional[Any]="text" ):
'''simple docstring'''
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
lowerCamelCase__ = "cpu" # do conversion on cpu
lowerCamelCase__ = _get_ckpt_path(a__ , use_small=a__ )
lowerCamelCase__ = _load_model(a__ , a__ , model_type=a__ , use_small=a__ )
# load bark initial model
lowerCamelCase__ = _bark_load_model(a__ , "cpu" , model_type=a__ , use_small=a__ )
if model_type == "text":
lowerCamelCase__ = bark_model["model"]
if model.num_parameters(exclude_embeddings=a__ ) != bark_model.get_num_params():
raise ValueError("initial and new models don't have the same number of parameters" )
# check if same output as the bark model
lowerCamelCase__ = 5
lowerCamelCase__ = 10
if model_type in ["text", "coarse"]:
lowerCamelCase__ = torch.randint(256 , (batch_size, sequence_length) , dtype=torch.int )
lowerCamelCase__ = bark_model(a__ )[0]
lowerCamelCase__ = model(a__ )
# take last logits
lowerCamelCase__ = output_new_model_total.logits[:, [-1], :]
else:
lowerCamelCase__ = 3
lowerCamelCase__ = 8
lowerCamelCase__ = torch.randint(256 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
lowerCamelCase__ = model(a__ , a__ )
lowerCamelCase__ = bark_model(a__ , a__ )
lowerCamelCase__ = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError("initial and new outputs don't have the same shape" )
if (output_new_model - output_old_model).abs().max().item() > 1E-3:
raise ValueError("initial and new outputs are not equal" )
Path(a__ ).mkdir(exist_ok=a__ )
model.save_pretrained(a__ )
def lowerCAmelCase( a__ : Dict , a__ : int , a__ : List[str] , a__ : Tuple , a__ : int , a__ : Tuple , ):
'''simple docstring'''
lowerCamelCase__ = os.path.join(a__ , a__ )
lowerCamelCase__ = BarkSemanticConfig.from_pretrained(os.path.join(a__ , "config.json" ) )
lowerCamelCase__ = BarkCoarseConfig.from_pretrained(os.path.join(a__ , "config.json" ) )
lowerCamelCase__ = BarkFineConfig.from_pretrained(os.path.join(a__ , "config.json" ) )
lowerCamelCase__ = EncodecConfig.from_pretrained("facebook/encodec_24khz" )
lowerCamelCase__ = BarkSemanticModel.from_pretrained(a__ )
lowerCamelCase__ = BarkCoarseModel.from_pretrained(a__ )
lowerCamelCase__ = BarkFineModel.from_pretrained(a__ )
lowerCamelCase__ = EncodecModel.from_pretrained("facebook/encodec_24khz" )
lowerCamelCase__ = BarkConfig.from_sub_model_configs(
a__ , a__ , a__ , a__ )
lowerCamelCase__ = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
lowerCamelCase__ = BarkModel(a__ )
lowerCamelCase__ = semantic
lowerCamelCase__ = coarseAcoustic
lowerCamelCase__ = fineAcoustic
lowerCamelCase__ = codec
lowerCamelCase__ = bark_generation_config
Path(a__ ).mkdir(exist_ok=a__ )
bark.save_pretrained(a__ , repo_id=a__ , push_to_hub=a__ )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("model_type", type=str, help="text, coarse or fine.")
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--is_small", action="store_true", help="convert the small version instead of the large.")
lowerCAmelCase_ = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 426 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE :Tuple = {
'''configuration_megatron_bert''': ['''MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegatronBertConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE :Union[str, Any] = [
'''MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MegatronBertForCausalLM''',
'''MegatronBertForMaskedLM''',
'''MegatronBertForMultipleChoice''',
'''MegatronBertForNextSentencePrediction''',
'''MegatronBertForPreTraining''',
'''MegatronBertForQuestionAnswering''',
'''MegatronBertForSequenceClassification''',
'''MegatronBertForTokenClassification''',
'''MegatronBertModel''',
'''MegatronBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE :Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 283 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
SCREAMING_SNAKE_CASE :str = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE :Dict = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
SCREAMING_SNAKE_CASE :str = {
'''vocab_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/vocab.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/vocab.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/vocab.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json''',
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/merges.txt''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/merges.txt''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/merges.txt''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt''',
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt'''
),
},
'''tokenizer_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/tokenizer.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/tokenizer.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json''',
'''roberta-base-openai-detector''': (
'''https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json'''
),
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json'''
),
},
}
SCREAMING_SNAKE_CASE :Tuple = {
'''roberta-base''': 5_12,
'''roberta-large''': 5_12,
'''roberta-large-mnli''': 5_12,
'''distilroberta-base''': 5_12,
'''roberta-base-openai-detector''': 5_12,
'''roberta-large-openai-detector''': 5_12,
}
class __lowerCAmelCase ( a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE = ['input_ids', 'attention_mask']
_SCREAMING_SNAKE_CASE = RobertaTokenizer
def __init__( self : Tuple , _lowerCAmelCase : Union[str, Any]=None , _lowerCAmelCase : Tuple=None , _lowerCAmelCase : Any=None , _lowerCAmelCase : Optional[Any]="replace" , _lowerCAmelCase : str="<s>" , _lowerCAmelCase : Tuple="</s>" , _lowerCAmelCase : Any="</s>" , _lowerCAmelCase : Tuple="<s>" , _lowerCAmelCase : Any="<unk>" , _lowerCAmelCase : Tuple="<pad>" , _lowerCAmelCase : List[str]="<mask>" , _lowerCAmelCase : str=False , _lowerCAmelCase : Union[str, Any]=True , **_lowerCAmelCase : int , ) -> str:
"""simple docstring"""
super().__init__(
_lowerCAmelCase , _lowerCAmelCase , tokenizer_file=_lowerCAmelCase , errors=_lowerCAmelCase , bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , add_prefix_space=_lowerCAmelCase , trim_offsets=_lowerCAmelCase , **_lowerCAmelCase , )
snake_case_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , _lowerCAmelCase ) != add_prefix_space:
snake_case_ = getattr(_lowerCAmelCase , pre_tok_state.pop("type" ) )
snake_case_ = add_prefix_space
snake_case_ = pre_tok_class(**_lowerCAmelCase )
snake_case_ = add_prefix_space
snake_case_ = "post_processor"
snake_case_ = getattr(self.backend_tokenizer , _lowerCAmelCase , _lowerCAmelCase )
if tokenizer_component_instance:
snake_case_ = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
snake_case_ = tuple(state["sep"] )
if "cls" in state:
snake_case_ = tuple(state["cls"] )
snake_case_ = False
if state.get("add_prefix_space" , _lowerCAmelCase ) != add_prefix_space:
snake_case_ = add_prefix_space
snake_case_ = True
if state.get("trim_offsets" , _lowerCAmelCase ) != trim_offsets:
snake_case_ = trim_offsets
snake_case_ = True
if changes_to_apply:
snake_case_ = getattr(_lowerCAmelCase , state.pop("type" ) )
snake_case_ = component_class(**_lowerCAmelCase )
setattr(self.backend_tokenizer , _lowerCAmelCase , _lowerCAmelCase )
@property
def lowerCAmelCase__ ( self : Dict ) -> str:
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCAmelCase__ ( self : Tuple , _lowerCAmelCase : Tuple ) -> Optional[int]:
"""simple docstring"""
snake_case_ = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else value
snake_case_ = value
def lowerCAmelCase__ ( self : Dict , *_lowerCAmelCase : Optional[int] , **_lowerCAmelCase : Optional[int] ) -> BatchEncoding:
"""simple docstring"""
snake_case_ = kwargs.get("is_split_into_words" , _lowerCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase__ ( self : Optional[Any] , *_lowerCAmelCase : str , **_lowerCAmelCase : Any ) -> BatchEncoding:
"""simple docstring"""
snake_case_ = kwargs.get("is_split_into_words" , _lowerCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase__ ( self : int , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
snake_case_ = self._tokenizer.model.save(_lowerCAmelCase , name=_lowerCAmelCase )
return tuple(_lowerCAmelCase )
def lowerCAmelCase__ ( self : Any , _lowerCAmelCase : int , _lowerCAmelCase : List[Any]=None ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCAmelCase__ ( self : Union[str, Any] , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 283 | 1 |
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def _lowercase ( UpperCamelCase_ ) -> Dict:
'''simple docstring'''
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class lowercase__ ( nn.Module ):
def __init__( self : Dict , UpperCAmelCase_ : nn.Module , UpperCAmelCase_ : int ):
super().__init__()
SCREAMING_SNAKE_CASE__ = module
SCREAMING_SNAKE_CASE__ = nn.Sequential(
nn.Linear(module.in_features , UpperCAmelCase_ , bias=UpperCAmelCase_ ) , nn.Linear(UpperCAmelCase_ , module.out_features , bias=UpperCAmelCase_ ) , )
SCREAMING_SNAKE_CASE__ = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=UpperCAmelCase_ )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def A_ ( self : Dict , UpperCAmelCase_ : Optional[int] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : List[str] ):
return self.module(UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_ ) + self.adapter(UpperCAmelCase_ )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class lowercase__ ( unittest.TestCase ):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
A__ : Any ="""bigscience/bloom-1b7"""
# Constant values
A__ : Union[str, Any] =2.109_6595_5269_2574
A__ : List[str] ="""Hello my name is"""
A__ : List[str] =set()
EXPECTED_OUTPUTS.add("""Hello my name is John and I am a professional photographer. I""" )
EXPECTED_OUTPUTS.add("""Hello my name is John.\nI am a friend of your father.\n""" )
EXPECTED_OUTPUTS.add("""Hello my name is John Doe, I am a student at the University""" )
A__ : Optional[Any] =1_0
def A_ ( self : str ):
# Models and tokenizer
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(self.model_name )
class lowercase__ ( _UpperCAmelCase ):
def A_ ( self : int ):
super().setUp()
# Models and tokenizer
SCREAMING_SNAKE_CASE__ = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map='auto' )
SCREAMING_SNAKE_CASE__ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=UpperCAmelCase_ , device_map='auto' )
def A_ ( self : int ):
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def A_ ( self : List[str] ):
SCREAMING_SNAKE_CASE__ = self.model_abit.config
self.assertTrue(hasattr(UpperCAmelCase_ , 'quantization_config' ) )
SCREAMING_SNAKE_CASE__ = config.to_dict()
SCREAMING_SNAKE_CASE__ = config.to_diff_dict()
SCREAMING_SNAKE_CASE__ = config.to_json_string()
def A_ ( self : Union[str, Any] ):
from bitsandbytes.nn import Paramsabit
SCREAMING_SNAKE_CASE__ = self.model_fpaa.get_memory_footprint()
SCREAMING_SNAKE_CASE__ = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
SCREAMING_SNAKE_CASE__ = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def A_ ( self : List[Any] ):
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(UpperCAmelCase_ , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def A_ ( self : List[str] ):
SCREAMING_SNAKE_CASE__ = self.tokenizer(self.input_text , return_tensors='pt' )
SCREAMING_SNAKE_CASE__ = self.model_abit.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=UpperCAmelCase_ ) , self.EXPECTED_OUTPUTS )
def A_ ( self : int ):
SCREAMING_SNAKE_CASE__ = BitsAndBytesConfig()
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=UpperCAmelCase_ , device_map='auto' )
SCREAMING_SNAKE_CASE__ = self.tokenizer(self.input_text , return_tensors='pt' )
SCREAMING_SNAKE_CASE__ = model_abit_from_config.generate(
input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=UpperCAmelCase_ ) , self.EXPECTED_OUTPUTS )
def A_ ( self : str ):
with self.assertRaises(UpperCAmelCase_ ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(UpperCAmelCase_ )
def A_ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ = BitsAndBytesConfig()
with self.assertRaises(UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=UpperCAmelCase_ , load_in_abit=UpperCAmelCase_ , device_map='auto' , bnb_abit_quant_type='nf4' , )
def A_ ( self : str ):
with self.assertRaises(UpperCAmelCase_ ):
# Tries with `str`
self.model_abit.to('cpu' )
with self.assertRaises(UpperCAmelCase_ ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(UpperCAmelCase_ ):
# Tries with a `device`
self.model_abit.to(torch.device('cuda:0' ) )
with self.assertRaises(UpperCAmelCase_ ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(UpperCAmelCase_ ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
SCREAMING_SNAKE_CASE__ = self.tokenizer(self.input_text , return_tensors='pt' )
SCREAMING_SNAKE_CASE__ = self.model_fpaa.to(torch.floataa )
SCREAMING_SNAKE_CASE__ = self.model_fpaa.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
SCREAMING_SNAKE_CASE__ = self.model_fpaa.to('cpu' )
# Check this does not throw an error
SCREAMING_SNAKE_CASE__ = self.model_fpaa.half()
# Check this does not throw an error
SCREAMING_SNAKE_CASE__ = self.model_fpaa.float()
def A_ ( self : int ):
SCREAMING_SNAKE_CASE__ = AutoModelForSeqaSeqLM.from_pretrained('t5-small' , load_in_abit=UpperCAmelCase_ , device_map='auto' )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class lowercase__ ( unittest.TestCase ):
@classmethod
def A_ ( cls : Dict ):
SCREAMING_SNAKE_CASE__ = 't5-small'
SCREAMING_SNAKE_CASE__ = 'google/flan-t5-small' # flan-t5 uses dense-act instead of dense-relu-dense
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(cls.model_name )
SCREAMING_SNAKE_CASE__ = 'Translate in German: Hello, my dog is cute'
def A_ ( self : List[Any] ):
gc.collect()
torch.cuda.empty_cache()
def A_ ( self : int ):
from transformers import TaForConditionalGeneration
SCREAMING_SNAKE_CASE__ = TaForConditionalGeneration._keep_in_fpaa_modules
SCREAMING_SNAKE_CASE__ = None
# test with `t5-small`
SCREAMING_SNAKE_CASE__ = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=UpperCAmelCase_ , device_map='auto' )
SCREAMING_SNAKE_CASE__ = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
SCREAMING_SNAKE_CASE__ = model.generate(**UpperCAmelCase_ )
# test with `flan-t5-small`
SCREAMING_SNAKE_CASE__ = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=UpperCAmelCase_ , device_map='auto' )
SCREAMING_SNAKE_CASE__ = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
SCREAMING_SNAKE_CASE__ = model.generate(**UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = modules
def A_ ( self : Union[str, Any] ):
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
SCREAMING_SNAKE_CASE__ = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=UpperCAmelCase_ , device_map='auto' )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
SCREAMING_SNAKE_CASE__ = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
SCREAMING_SNAKE_CASE__ = model.generate(**UpperCAmelCase_ )
# test with `flan-t5-small`
SCREAMING_SNAKE_CASE__ = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=UpperCAmelCase_ , device_map='auto' )
SCREAMING_SNAKE_CASE__ = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
SCREAMING_SNAKE_CASE__ = model.generate(**UpperCAmelCase_ )
class lowercase__ ( _UpperCAmelCase ):
def A_ ( self : int ):
super().setUp()
# model_name
SCREAMING_SNAKE_CASE__ = 'bigscience/bloom-560m'
SCREAMING_SNAKE_CASE__ = 't5-small'
# Different types of model
SCREAMING_SNAKE_CASE__ = AutoModel.from_pretrained(self.model_name , load_in_abit=UpperCAmelCase_ , device_map='auto' )
# Sequence classification model
SCREAMING_SNAKE_CASE__ = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=UpperCAmelCase_ , device_map='auto' )
# CausalLM model
SCREAMING_SNAKE_CASE__ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=UpperCAmelCase_ , device_map='auto' )
# Seq2seq model
SCREAMING_SNAKE_CASE__ = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=UpperCAmelCase_ , device_map='auto' )
def A_ ( self : List[str] ):
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def A_ ( self : int ):
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class lowercase__ ( _UpperCAmelCase ):
def A_ ( self : Optional[int] ):
super().setUp()
def A_ ( self : Union[str, Any] ):
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def A_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE__ = pipeline(
'text-generation' , model=self.model_name , model_kwargs={'device_map': 'auto', 'load_in_4bit': True, 'torch_dtype': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
SCREAMING_SNAKE_CASE__ = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]['generated_text'] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class lowercase__ ( _UpperCAmelCase ):
def A_ ( self : Any ):
super().setUp()
def A_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE__ = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=UpperCAmelCase_ , device_map='balanced' )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
SCREAMING_SNAKE_CASE__ = self.tokenizer(self.input_text , return_tensors='pt' )
# Second real batch
SCREAMING_SNAKE_CASE__ = model_parallel.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=UpperCAmelCase_ ) , self.EXPECTED_OUTPUTS )
class lowercase__ ( _UpperCAmelCase ):
def A_ ( self : List[str] ):
SCREAMING_SNAKE_CASE__ = 'facebook/opt-350m'
super().setUp()
def A_ ( self : Tuple ):
if version.parse(importlib.metadata.version('bitsandbytes' ) ) < version.parse('0.37.0' ):
return
# Step 1: freeze all parameters
SCREAMING_SNAKE_CASE__ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=UpperCAmelCase_ )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
SCREAMING_SNAKE_CASE__ = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
SCREAMING_SNAKE_CASE__ = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(UpperCAmelCase_ ) ):
SCREAMING_SNAKE_CASE__ = LoRALayer(module.q_proj , rank=16 )
SCREAMING_SNAKE_CASE__ = LoRALayer(module.k_proj , rank=16 )
SCREAMING_SNAKE_CASE__ = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
SCREAMING_SNAKE_CASE__ = self.tokenizer('Test batch ' , return_tensors='pt' ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
SCREAMING_SNAKE_CASE__ = model.forward(**UpperCAmelCase_ )
out.logits.norm().backward()
for module in model.modules():
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(UpperCAmelCase_ , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class lowercase__ ( _UpperCAmelCase ):
A__ : List[Any] ="""gpt2-xl"""
A__ : Optional[Any] =3.3191_8548_5415_2187
| 400 |
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__snake_case = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
class lowercase__ ( _UpperCAmelCase , unittest.TestCase ):
A__ : Optional[Any] =XLMProphetNetTokenizer
A__ : str =False
A__ : Union[str, Any] =True
def A_ ( self : List[Any] ):
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE__ = XLMProphetNetTokenizer(UpperCAmelCase_ , keep_accents=UpperCAmelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def A_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ = '[PAD]'
SCREAMING_SNAKE_CASE__ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase_ ) , UpperCAmelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase_ ) , UpperCAmelCase_ )
def A_ ( self : Any ):
SCREAMING_SNAKE_CASE__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '[PAD]' )
self.assertEqual(vocab_keys[1] , '[CLS]' )
self.assertEqual(vocab_keys[-1] , 'j' )
self.assertEqual(len(UpperCAmelCase_ ) , 1012 )
def A_ ( self : List[str] ):
self.assertEqual(self.get_tokenizer().vocab_size , 1012 )
def A_ ( self : Dict ):
SCREAMING_SNAKE_CASE__ = XLMProphetNetTokenizer(UpperCAmelCase_ , keep_accents=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize('This is a test' )
self.assertListEqual(UpperCAmelCase_ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
UpperCAmelCase_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
SCREAMING_SNAKE_CASE__ = tokenizer.convert_tokens_to_ids(UpperCAmelCase_ )
self.assertListEqual(
UpperCAmelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] , )
SCREAMING_SNAKE_CASE__ = tokenizer.convert_ids_to_tokens(UpperCAmelCase_ )
self.assertListEqual(
UpperCAmelCase_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'[UNK]',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'[UNK]',
'.',
] , )
@cached_property
def A_ ( self : Tuple ):
return XLMProphetNetTokenizer.from_pretrained('microsoft/xprophetnet-large-wiki100-cased' )
@slow
def A_ ( self : List[str] ):
SCREAMING_SNAKE_CASE__ = 'Hello World!'
SCREAMING_SNAKE_CASE__ = [35389, 6672, 49, 2]
self.assertListEqual(UpperCAmelCase_ , self.big_tokenizer.encode(UpperCAmelCase_ ) )
@slow
def A_ ( self : Union[str, Any] ):
# fmt: off
SCREAMING_SNAKE_CASE__ = {'input_ids': [[11073, 82783, 18, 26, 82783, 549, 51540, 248, 17209, 1301, 217, 20, 215186, 1325, 147, 17209, 1301, 217, 20, 56370, 53, 122020, 20, 16477, 27, 87355, 4548, 20, 4728, 78392, 17, 159969, 18, 26, 24491, 629, 15, 538, 22704, 5439, 15, 2788, 24491, 9885, 15, 43534, 605, 15, 814, 18403, 33200, 29, 15, 43534, 24458, 12410, 111, 24966, 83669, 9637, 144068, 26, 850, 22346, 27, 147, 24966, 83669, 83490, 26, 39113, 735, 27, 689, 656, 2800, 1339, 4600, 53, 122020, 115785, 34, 816, 1339, 46887, 18, 147, 53905, 1951, 42238, 41170, 17732, 834, 436, 15, 27523, 98733, 217, 147, 5542, 4981, 930, 17347, 16, 2], [20091, 629, 94, 82786, 58, 490, 20, 1528, 84, 53905, 344, 80592, 110128, 18822, 5267, 1306, 62, 152537, 308, 7997, 401, 124427, 549, 35442, 225, 109, 15055, 25748, 147, 7119, 43712, 34, 767, 135366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 63784, 119466, 17, 147808, 88214, 18, 656, 81, 32, 3296, 10280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase_ , model_name='microsoft/xprophetnet-large-wiki100-cased' , revision='1acad1643ddd54a44df6a1b797ada8373685d90e' , )
| 400 | 1 |
def __lowercase ( snake_case ):
"""simple docstring"""
if not all(char in '''01''' for char in bin_string ):
raise ValueError('''Non-binary value was passed to the function''' )
if not bin_string:
raise ValueError('''Empty string was passed to the function''' )
__magic_name__ :str = ''''''
while len(__UpperCamelCase ) % 3 != 0:
__magic_name__ :Tuple = '''0''' + bin_string
__magic_name__ :Dict = [
bin_string[index : index + 3]
for index in range(len(__UpperCamelCase ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
__magic_name__ :Tuple = 0
for index, val in enumerate(__UpperCamelCase ):
oct_val += int(2 ** (2 - index) * int(__UpperCamelCase ) )
oct_string += str(__UpperCamelCase )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 0 |
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
snake_case : Tuple = logging.get_logger(__name__)
snake_case : List[Any] = '▁'
snake_case : Tuple = {
'vocab_file': 'vocab.json',
'spm_file': 'sentencepiece.bpe.model',
}
snake_case : Optional[Any] = {
'vocab_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json'
),
},
'spm_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model'
)
},
}
snake_case : str = {
'facebook/s2t-small-librispeech-asr': 1_024,
}
snake_case : Optional[Any] = ['pt', 'fr', 'ru', 'nl', 'ro', 'it', 'es', 'de']
snake_case : Union[str, Any] = {'mustc': MUSTC_LANGS}
class lowerCamelCase__( snake_case_ ):
UpperCamelCase : Union[str, Any] = VOCAB_FILES_NAMES
UpperCamelCase : str = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase : List[Any] = MAX_MODEL_INPUT_SIZES
UpperCamelCase : List[str] = ["input_ids", "attention_mask"]
UpperCamelCase : List[int] = []
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase="<s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase = None , **__UpperCAmelCase , ):
"""simple docstring"""
__lowercase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , do_upper_case=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , tgt_lang=__UpperCAmelCase , lang_codes=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , )
__lowercase = do_upper_case
__lowercase = do_lower_case
__lowercase = load_json(__UpperCAmelCase )
__lowercase = {v: k for k, v in self.encoder.items()}
__lowercase = spm_file
__lowercase = load_spm(__UpperCAmelCase , self.sp_model_kwargs )
if lang_codes is not None:
__lowercase = lang_codes
__lowercase = LANGUAGES[lang_codes]
__lowercase = [F'''<lang:{lang}>''' for lang in self.langs]
__lowercase = {lang: self.sp_model.PieceToId(F'''<lang:{lang}>''' ) for lang in self.langs}
__lowercase = self.lang_tokens
__lowercase = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
__lowercase = {}
@property
def __magic_name__ ( self ):
"""simple docstring"""
return len(self.encoder )
@property
def __magic_name__ ( self ):
"""simple docstring"""
return self._tgt_lang
@tgt_lang.setter
def __magic_name__ ( self , __UpperCAmelCase ):
"""simple docstring"""
__lowercase = new_tgt_lang
self.set_tgt_lang_special_tokens(__UpperCAmelCase )
def __magic_name__ ( self , __UpperCAmelCase ):
"""simple docstring"""
__lowercase = self.lang_code_to_id[tgt_lang]
__lowercase = [lang_code_id]
def __magic_name__ ( self , __UpperCAmelCase ):
"""simple docstring"""
return self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase )
def __magic_name__ ( self , __UpperCAmelCase ):
"""simple docstring"""
return self.encoder.get(__UpperCAmelCase , self.encoder[self.unk_token] )
def __magic_name__ ( self , __UpperCAmelCase ):
"""simple docstring"""
return self.decoder.get(__UpperCAmelCase , self.unk_token )
def __magic_name__ ( self , __UpperCAmelCase ):
"""simple docstring"""
__lowercase = []
__lowercase = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
__lowercase = self.sp_model.decode(__UpperCAmelCase )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
__lowercase = []
else:
current_sub_tokens.append(__UpperCAmelCase )
__lowercase = self.sp_model.decode(__UpperCAmelCase )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def __magic_name__ ( self , __UpperCAmelCase , __UpperCAmelCase=None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def __magic_name__ ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase )
__lowercase = [1] * len(self.prefix_tokens )
__lowercase = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(__UpperCAmelCase )) + suffix_ones
return prefix_ones + ([0] * len(__UpperCAmelCase )) + ([0] * len(__UpperCAmelCase )) + suffix_ones
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
"""simple docstring"""
__lowercase = self.__dict__.copy()
__lowercase = None
return state
def __setstate__( self , __UpperCAmelCase ):
"""simple docstring"""
__lowercase = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__lowercase = {}
__lowercase = load_spm(self.spm_file , self.sp_model_kwargs )
def __magic_name__ ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
"""simple docstring"""
__lowercase = Path(__UpperCAmelCase )
assert save_dir.is_dir(), F'''{save_directory} should be a directory'''
__lowercase = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""vocab_file"""]
)
__lowercase = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""spm_file"""]
)
save_json(self.encoder , __UpperCAmelCase )
if os.path.abspath(self.spm_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , __UpperCAmelCase )
elif not os.path.isfile(self.spm_file ):
with open(__UpperCAmelCase , """wb""" ) as fi:
__lowercase = self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase )
return (str(__UpperCAmelCase ), str(__UpperCAmelCase ))
def lowercase__ ( __UpperCamelCase : str , __UpperCamelCase : Dict[str, Any] ):
'''simple docstring'''
__lowercase = sentencepiece.SentencePieceProcessor(**__UpperCamelCase )
spm.Load(str(__UpperCamelCase ) )
return spm
def lowercase__ ( __UpperCamelCase : str ):
'''simple docstring'''
with open(__UpperCamelCase , """r""" ) as f:
return json.load(__UpperCamelCase )
def lowercase__ ( __UpperCamelCase : str , __UpperCamelCase : str ):
'''simple docstring'''
with open(__UpperCamelCase , """w""" ) as f:
json.dump(__UpperCamelCase , __UpperCamelCase , indent=2 )
| 566 | 0 |
"""simple docstring"""
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
UpperCAmelCase : Dict = logging.getLogger(__name__)
if __name__ == "__main__":
UpperCAmelCase : Dict = argparse.ArgumentParser(
description="Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)"
)
parser.add_argument(
"--data_file", type=str, default="data/dump.bert-base-uncased.pickle", help="The binarized dataset."
)
parser.add_argument(
"--token_counts_dump", type=str, default="data/token_counts.bert-base-uncased.pickle", help="The dump file."
)
parser.add_argument("--vocab_size", default=30522, type=int)
UpperCAmelCase : int = parser.parse_args()
logger.info(f'''Loading data from {args.data_file}''')
with open(args.data_file, "rb") as fp:
UpperCAmelCase : int = pickle.load(fp)
logger.info("Counting occurrences for MLM.")
UpperCAmelCase : Union[str, Any] = Counter()
for tk_ids in data:
counter.update(tk_ids)
UpperCAmelCase : List[str] = [0] * args.vocab_size
for k, v in counter.items():
UpperCAmelCase : str = v
logger.info(f'''Dump to {args.token_counts_dump}''')
with open(args.token_counts_dump, "wb") as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 121 | """simple docstring"""
def __a ( _lowercase ):
"""simple docstring"""
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print("Program to check whether a number is a Perfect number or not...")
UpperCAmelCase : Tuple = int(input("Enter number: ").strip())
print(f'''{number} is {'' if perfect(number) else 'not '}a Perfect Number.''')
| 121 | 1 |
'''simple docstring'''
import os
from bleurt import score # From: git+https://github.com/google-research/bleurt.git
import datasets
UpperCAmelCase = datasets.logging.get_logger(__name__)
UpperCAmelCase = '\\n@inproceedings{bleurt,\n title={BLEURT: Learning Robust Metrics for Text Generation},\n author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},\n booktitle={ACL},\n year={2020},\n url={https://arxiv.org/abs/2004.04696}\n}\n'
UpperCAmelCase = '\\nBLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)\nand then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune\nit for your specific application (the latter is expected to perform better).\n\nSee the project\'s README at https://github.com/google-research/bleurt#readme for more information.\n'
UpperCAmelCase = '\nBLEURT score.\n\nArgs:\n `predictions` (list of str): prediction/candidate sentences\n `references` (list of str): reference sentences\n `checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.\n\nReturns:\n \'scores\': List of scores.\nExamples:\n\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> bleurt = datasets.load_metric("bleurt")\n >>> results = bleurt.compute(predictions=predictions, references=references)\n >>> print([round(v, 2) for v in results["scores"]])\n [1.03, 1.04]\n'
UpperCAmelCase = {
'bleurt-tiny-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip',
'bleurt-tiny-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip',
'bleurt-base-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip',
'bleurt-base-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip',
'bleurt-large-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip',
'bleurt-large-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip',
'BLEURT-20-D3': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip',
'BLEURT-20-D6': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip',
'BLEURT-20-D12': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip',
'BLEURT-20': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip',
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __snake_case( datasets.Metric ):
'''simple docstring'''
def __snake_case ( self ) -> Dict:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/google-research/bleurt""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/google-research/bleurt"""] , reference_urls=["""https://github.com/google-research/bleurt""", """https://arxiv.org/abs/2004.04696"""] , )
def __snake_case ( self , A_ ) -> int:
# check that config name specifies a valid BLEURT model
if self.config_name == "default":
logger.warning(
"""Using default BLEURT-Base checkpoint for sequence maximum length 128. """
"""You can use a bigger model for better results with e.g.: datasets.load_metric(\'bleurt\', \'bleurt-large-512\').""" )
lowerCAmelCase = """bleurt-base-128"""
if self.config_name.lower() in CHECKPOINT_URLS:
lowerCAmelCase = self.config_name.lower()
elif self.config_name.upper() in CHECKPOINT_URLS:
lowerCAmelCase = self.config_name.upper()
else:
raise KeyError(
f'{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}' )
# download the model checkpoint specified by self.config_name and set up the scorer
lowerCAmelCase = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] )
lowerCAmelCase = score.BleurtScorer(os.path.join(A_ , A_ ) )
def __snake_case ( self , A_ , A_ ) -> Tuple:
lowerCAmelCase = self.scorer.score(references=A_ , candidates=A_ )
return {"scores": scores} | 433 | '''simple docstring'''
from ..utils import DummyObject, requires_backends
class _lowercase( metaclass=_lowerCamelCase ):
"""simple docstring"""
__lowerCamelCase = ['''onnx''']
def __init__( self: Any ,*a: List[str] ,**a: str ):
requires_backends(self ,['onnx'] )
@classmethod
def snake_case ( cls: Optional[Any] ,*a: List[str] ,**a: Any ):
requires_backends(cls ,['onnx'] )
@classmethod
def snake_case ( cls: str ,*a: Dict ,**a: str ):
requires_backends(cls ,['onnx'] )
| 396 | 0 |
'''simple docstring'''
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
__lowercase = (7_2_0, 1_2_8_0) # Height, Width
__lowercase = (0.4, 0.6) # if height or width lower than this scale, drop it.
__lowercase = 1 / 1_0_0
__lowercase = ''''''
__lowercase = ''''''
__lowercase = ''''''
__lowercase = 2_5_0
def snake_case__ ( ) -> None:
'''simple docstring'''
lowerCAmelCase , lowerCAmelCase = get_dataset(_A , _A )
for index in range(_A ):
lowerCAmelCase = random.sample(range(len(_A ) ) , 4 )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = update_image_and_anno(
_A , _A , _A , _A , _A , filter_scale=_A , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
lowerCAmelCase = random_chars(32 )
lowerCAmelCase = path.split(os.sep )[-1].rsplit(""".""" , 1 )[0]
lowerCAmelCase = f"{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}"
cva.imwrite(f"{file_root}.jpg" , _A , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f"Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}" )
lowerCAmelCase = []
for anno in new_annos:
lowerCAmelCase = anno[3] - anno[1]
lowerCAmelCase = anno[4] - anno[2]
lowerCAmelCase = anno[1] + width / 2
lowerCAmelCase = anno[2] + height / 2
lowerCAmelCase = f"{anno[0]} {x_center} {y_center} {width} {height}"
annos_list.append(_A )
with open(f"{file_root}.txt" , """w""" ) as outfile:
outfile.write("""\n""".join(line for line in annos_list ) )
def snake_case__ ( _A: str , _A: str ) -> tuple[list, list]:
'''simple docstring'''
lowerCAmelCase = []
lowerCAmelCase = []
for label_file in glob.glob(os.path.join(_A , """*.txt""" ) ):
lowerCAmelCase = label_file.split(os.sep )[-1].rsplit(""".""" , 1 )[0]
with open(_A ) as in_file:
lowerCAmelCase = in_file.readlines()
lowerCAmelCase = os.path.join(_A , f"{label_name}.jpg" )
lowerCAmelCase = []
for obj_list in obj_lists:
lowerCAmelCase = obj_list.rstrip("""\n""" ).split(""" """ )
lowerCAmelCase = float(obj[1] ) - float(obj[3] ) / 2
lowerCAmelCase = float(obj[2] ) - float(obj[4] ) / 2
lowerCAmelCase = float(obj[1] ) + float(obj[3] ) / 2
lowerCAmelCase = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(_A )
labels.append(_A )
return img_paths, labels
def snake_case__ ( _A: list , _A: list , _A: list[int] , _A: tuple[int, int] , _A: tuple[float, float] , _A: float = 0.0 , ) -> tuple[list, list, str]:
'''simple docstring'''
lowerCAmelCase = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
lowerCAmelCase = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
lowerCAmelCase = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
lowerCAmelCase = int(scale_x * output_size[1] )
lowerCAmelCase = int(scale_y * output_size[0] )
lowerCAmelCase = []
lowerCAmelCase = []
for i, index in enumerate(_A ):
lowerCAmelCase = all_img_list[index]
path_list.append(_A )
lowerCAmelCase = all_annos[index]
lowerCAmelCase = cva.imread(_A )
if i == 0: # top-left
lowerCAmelCase = cva.resize(_A , (divid_point_x, divid_point_y) )
lowerCAmelCase = img
for bbox in img_annos:
lowerCAmelCase = bbox[1] * scale_x
lowerCAmelCase = bbox[2] * scale_y
lowerCAmelCase = bbox[3] * scale_x
lowerCAmelCase = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
lowerCAmelCase = cva.resize(_A , (output_size[1] - divid_point_x, divid_point_y) )
lowerCAmelCase = img
for bbox in img_annos:
lowerCAmelCase = scale_x + bbox[1] * (1 - scale_x)
lowerCAmelCase = bbox[2] * scale_y
lowerCAmelCase = scale_x + bbox[3] * (1 - scale_x)
lowerCAmelCase = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
lowerCAmelCase = cva.resize(_A , (divid_point_x, output_size[0] - divid_point_y) )
lowerCAmelCase = img
for bbox in img_annos:
lowerCAmelCase = bbox[1] * scale_x
lowerCAmelCase = scale_y + bbox[2] * (1 - scale_y)
lowerCAmelCase = bbox[3] * scale_x
lowerCAmelCase = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
lowerCAmelCase = cva.resize(
_A , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
lowerCAmelCase = img
for bbox in img_annos:
lowerCAmelCase = scale_x + bbox[1] * (1 - scale_x)
lowerCAmelCase = scale_y + bbox[2] * (1 - scale_y)
lowerCAmelCase = scale_x + bbox[3] * (1 - scale_x)
lowerCAmelCase = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
lowerCAmelCase = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def snake_case__ ( _A: int ) -> str:
'''simple docstring'''
assert number_char > 1, "The number of character should greater than 1"
lowerCAmelCase = ascii_lowercase + digits
return "".join(random.choice(_A ) for _ in range(_A ) )
if __name__ == "__main__":
main()
print('''DONE ✅''')
| 605 | '''simple docstring'''
import operator as op
def snake_case__ ( _A: Optional[Any] ) -> Tuple:
'''simple docstring'''
lowerCAmelCase = []
lowerCAmelCase = lambda _A , _A : int(x / y ) # noqa: E731 integer division operation
lowerCAmelCase = {
"""^""": op.pow,
"""*""": op.mul,
"""/""": div,
"""+""": op.add,
"""-""": op.sub,
} # operators & their respective operation
# print table header
print("""Symbol""".center(8 ) , """Action""".center(12 ) , """Stack""" , sep=""" | """ )
print("""-""" * (30 + len(_A )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(_A ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ("""push(""" + x + """)""").ljust(12 ) , """,""".join(_A ) , sep=""" | """ )
else:
lowerCAmelCase = stack.pop() # pop stack
# output in tabular format
print("""""".rjust(8 ) , ("""pop(""" + b + """)""").ljust(12 ) , """,""".join(_A ) , sep=""" | """ )
lowerCAmelCase = stack.pop() # pop stack
# output in tabular format
print("""""".rjust(8 ) , ("""pop(""" + a + """)""").ljust(12 ) , """,""".join(_A ) , sep=""" | """ )
stack.append(
str(opr[x](int(_A ) , int(_A ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ("""push(""" + a + x + b + """)""").ljust(12 ) , """,""".join(_A ) , sep=""" | """ , )
return int(stack[0] )
if __name__ == "__main__":
__lowercase = input('''\n\nEnter a Postfix Equation (space separated) = ''').split(''' ''')
print('''\n\tResult = ''', solve(Postfix))
| 605 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : List[Any] = logging.get_logger(__name__)
lowercase__ : Optional[Any] = {
'''microsoft/swinv2-tiny-patch4-window8-256''': (
'''https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json'''
),
}
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = '''swinv2'''
lowerCAmelCase = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , _UpperCAmelCase=224 , _UpperCAmelCase=4 , _UpperCAmelCase=3 , _UpperCAmelCase=96 , _UpperCAmelCase=[2, 2, 6, 2] , _UpperCAmelCase=[3, 6, 12, 24] , _UpperCAmelCase=7 , _UpperCAmelCase=4.0 , _UpperCAmelCase=True , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.1 , _UpperCAmelCase="gelu" , _UpperCAmelCase=False , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-5 , _UpperCAmelCase=32 , **_UpperCAmelCase , ):
'''simple docstring'''
super().__init__(**_UpperCAmelCase)
__A : Dict = image_size
__A : Optional[int] = patch_size
__A : int = num_channels
__A : Tuple = embed_dim
__A : Dict = depths
__A : str = len(_UpperCAmelCase)
__A : int = num_heads
__A : Optional[int] = window_size
__A : int = mlp_ratio
__A : Optional[Any] = qkv_bias
__A : Dict = hidden_dropout_prob
__A : Union[str, Any] = attention_probs_dropout_prob
__A : Any = drop_path_rate
__A : List[Any] = hidden_act
__A : Optional[Any] = use_absolute_embeddings
__A : List[Any] = layer_norm_eps
__A : Union[str, Any] = initializer_range
__A : Union[str, Any] = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__A : List[str] = int(embed_dim * 2 ** (len(_UpperCAmelCase) - 1))
__A : Dict = (0, 0, 0, 0) | 8 |
'''simple docstring'''
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
lowercase__ : Any = '''hf-internal-testing/tiny-random-bert'''
lowercase__ : Optional[Any] = os.path.join(TRANSFORMERS_CACHE, '''models--hf-internal-testing--tiny-random-bert''')
lowercase__ : List[Any] = '''9b8c223d42b2188cb49d29af482996f9d0f3e5a6'''
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = cached_file(_UpperCAmelCase , _UpperCAmelCase)
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(_UpperCAmelCase))
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(_UpperCAmelCase , _UpperCAmelCase)))
with open(os.path.join(_UpperCAmelCase , 'refs' , 'main')) as f:
__A : Any = f.read()
self.assertEqual(_UpperCAmelCase , os.path.join(_UpperCAmelCase , 'snapshots' , _UpperCAmelCase , _UpperCAmelCase))
self.assertTrue(os.path.isfile(_UpperCAmelCase))
# File is cached at the same place the second time.
__A : Tuple = cached_file(_UpperCAmelCase , _UpperCAmelCase)
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase)
# Using a specific revision to test the full commit hash.
__A : List[Any] = cached_file(_UpperCAmelCase , _UpperCAmelCase , revision='9b8c223')
self.assertEqual(_UpperCAmelCase , os.path.join(_UpperCAmelCase , 'snapshots' , _UpperCAmelCase , _UpperCAmelCase))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
with self.assertRaisesRegex(_UpperCAmelCase , 'is not a valid model identifier'):
__A : Dict = cached_file('tiny-random-bert' , _UpperCAmelCase)
with self.assertRaisesRegex(_UpperCAmelCase , 'is not a valid git identifier'):
__A : Optional[int] = cached_file(_UpperCAmelCase , _UpperCAmelCase , revision='aaaa')
with self.assertRaisesRegex(_UpperCAmelCase , 'does not appear to have a file named'):
__A : int = cached_file(_UpperCAmelCase , 'conf')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
with self.assertRaisesRegex(_UpperCAmelCase , 'does not appear to have a file named'):
__A : Any = cached_file(_UpperCAmelCase , 'conf')
with open(os.path.join(_UpperCAmelCase , 'refs' , 'main')) as f:
__A : Dict = f.read()
self.assertTrue(os.path.isfile(os.path.join(_UpperCAmelCase , '.no_exist' , _UpperCAmelCase , 'conf')))
__A : List[Any] = cached_file(_UpperCAmelCase , 'conf' , _raise_exceptions_for_missing_entries=_UpperCAmelCase)
self.assertIsNone(_UpperCAmelCase)
__A : str = cached_file(_UpperCAmelCase , 'conf' , local_files_only=_UpperCAmelCase , _raise_exceptions_for_missing_entries=_UpperCAmelCase)
self.assertIsNone(_UpperCAmelCase)
__A : List[str] = mock.Mock()
__A : Dict = 500
__A : List[str] = {}
__A : List[Any] = HTTPError
__A : Optional[Any] = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' , return_value=_UpperCAmelCase) as mock_head:
__A : Dict = cached_file(_UpperCAmelCase , 'conf' , _raise_exceptions_for_connection_errors=_UpperCAmelCase)
self.assertIsNone(_UpperCAmelCase)
# This check we did call the fake head request
mock_head.assert_called()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self.assertTrue(has_file('hf-internal-testing/tiny-bert-pt-only' , _UpperCAmelCase))
self.assertFalse(has_file('hf-internal-testing/tiny-bert-pt-only' , _UpperCAmelCase))
self.assertFalse(has_file('hf-internal-testing/tiny-bert-pt-only' , _UpperCAmelCase))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self.assertIsNone(get_file_from_repo('bert-base-cased' , 'ahah.txt'))
# The function raises if the repository does not exist.
with self.assertRaisesRegex(_UpperCAmelCase , 'is not a valid model identifier'):
get_file_from_repo('bert-base-case' , _UpperCAmelCase)
# The function raises if the revision does not exist.
with self.assertRaisesRegex(_UpperCAmelCase , 'is not a valid git identifier'):
get_file_from_repo('bert-base-cased' , _UpperCAmelCase , revision='ahaha')
__A : List[str] = get_file_from_repo('bert-base-cased' , _UpperCAmelCase)
# The name is the cached name which is not very easy to test, so instead we load the content.
__A : List[str] = json.loads(open(_UpperCAmelCase , 'r').read())
self.assertEqual(config['hidden_size'] , 768)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
__A : Tuple = Path(_UpperCAmelCase) / 'a.txt'
filename.touch()
self.assertEqual(get_file_from_repo(_UpperCAmelCase , 'a.txt') , str(_UpperCAmelCase))
self.assertIsNone(get_file_from_repo(_UpperCAmelCase , 'b.txt')) | 8 | 1 |
import string
import numpy
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: int , __lowerCamelCase: int ):
return b if a == 0 else greatest_common_divisor(b % a , __lowerCamelCase )
class __lowerCamelCase :
"""simple docstring"""
lowerCAmelCase__ = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
lowerCAmelCase__ = numpy.vectorize(lambda snake_case_ : x % 36 )
lowerCAmelCase__ = numpy.vectorize(snake_case_ )
def __init__( self , UpperCAmelCase ) -> None:
'''simple docstring'''
lowercase_ = self.modulus(UpperCAmelCase ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
lowercase_ = encrypt_key.shape[0]
def A__ ( self , UpperCAmelCase ) -> int:
'''simple docstring'''
return self.key_string.index(UpperCAmelCase )
def A__ ( self , UpperCAmelCase ) -> str:
'''simple docstring'''
return self.key_string[round(UpperCAmelCase )]
def A__ ( self ) -> None:
'''simple docstring'''
lowercase_ = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
lowercase_ = det % len(self.key_string )
lowercase_ = len(self.key_string )
if greatest_common_divisor(UpperCAmelCase , len(self.key_string ) ) != 1:
lowercase_ = (
F'determinant modular {req_l} of encryption key({det}) '
F'is not co prime w.r.t {req_l}.\nTry another key.'
)
raise ValueError(UpperCAmelCase )
def A__ ( self , UpperCAmelCase ) -> str:
'''simple docstring'''
lowercase_ = [char for char in text.upper() if char in self.key_string]
lowercase_ = chars[-1]
while len(UpperCAmelCase ) % self.break_key != 0:
chars.append(UpperCAmelCase )
return "".join(UpperCAmelCase )
def A__ ( self , UpperCAmelCase ) -> str:
'''simple docstring'''
lowercase_ = self.process_text(text.upper() )
lowercase_ = ""
for i in range(0 , len(UpperCAmelCase ) - self.break_key + 1 , self.break_key ):
lowercase_ = text[i : i + self.break_key]
lowercase_ = [self.replace_letters(UpperCAmelCase ) for char in batch]
lowercase_ = numpy.array([vec] ).T
lowercase_ = self.modulus(self.encrypt_key.dot(UpperCAmelCase ) ).T.tolist()[
0
]
lowercase_ = "".join(
self.replace_digits(UpperCAmelCase ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def A__ ( self ) -> numpy.ndarray:
'''simple docstring'''
lowercase_ = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
lowercase_ = det % len(self.key_string )
lowercase_ = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
lowercase_ = i
break
lowercase_ = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(UpperCAmelCase ) )
def A__ ( self , UpperCAmelCase ) -> str:
'''simple docstring'''
lowercase_ = self.make_decrypt_key()
lowercase_ = self.process_text(text.upper() )
lowercase_ = ""
for i in range(0 , len(UpperCAmelCase ) - self.break_key + 1 , self.break_key ):
lowercase_ = text[i : i + self.break_key]
lowercase_ = [self.replace_letters(UpperCAmelCase ) for char in batch]
lowercase_ = numpy.array([vec] ).T
lowercase_ = self.modulus(decrypt_key.dot(UpperCAmelCase ) ).T.tolist()[0]
lowercase_ = "".join(
self.replace_digits(UpperCAmelCase ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def SCREAMING_SNAKE_CASE_ ( ):
lowercase_ = int(input("Enter the order of the encryption key: " ) )
lowercase_ = []
print("Enter each row of the encryption key with space separated integers" )
for _ in range(__lowerCamelCase ):
lowercase_ = [int(__lowerCamelCase ) for x in input().split()]
hill_matrix.append(__lowerCamelCase )
lowercase_ = HillCipher(numpy.array(__lowerCamelCase ) )
print("Would you like to encrypt or decrypt some text? (1 or 2)" )
lowercase_ = input("\n1. Encrypt\n2. Decrypt\n" )
if option == "1":
lowercase_ = input("What text would you like to encrypt?: " )
print("Your encrypted text is:" )
print(hc.encrypt(__lowerCamelCase ) )
elif option == "2":
lowercase_ = input("What text would you like to decrypt?: " )
print("Your decrypted text is:" )
print(hc.decrypt(__lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 711 |
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("""1.6"""):
SCREAMING_SNAKE_CASE__ = True
from torch.cuda.amp import autocast
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
@dataclass
class __lowerCamelCase :
"""simple docstring"""
lowerCAmelCase__ = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={"help": "Whether to freeze the feature extractor layers of the model."} )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={"help": "Whether to log verbose messages or not."} , )
lowerCAmelCase__ = field(
default=2.0 , metadata={"help": "Maximum temperature for gumbel softmax."} )
lowerCAmelCase__ = field(
default=0.5 , metadata={"help": "Minimum temperature for gumbel softmax."} )
lowerCAmelCase__ = field(
default=0.999_995 , metadata={"help": "Decay of gumbel temperature during training."} )
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: ModelArguments , __lowerCamelCase: TrainingArguments ):
'''simple docstring'''
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
lowercase_ = logging.WARNING
if model_args.verbose_logging:
lowercase_ = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
lowercase_ = logging.INFO
logger.setLevel(__lowerCamelCase )
@dataclass
class __lowerCamelCase :
"""simple docstring"""
lowerCAmelCase__ = field(
default=snake_case_ , metadata={"help": "The name of the dataset to use (via the datasets library)."} )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
lowerCAmelCase__ = field(
default="train" , metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
} , )
lowerCAmelCase__ = field(
default="validation" , metadata={
"help": (
"The name of the validation data set split to use (via the datasets library). Defaults to 'validation'"
)
} , )
lowerCAmelCase__ = field(
default="file" , metadata={"help": "Column in the dataset that contains speech file path. Defaults to 'file'"} , )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
lowerCAmelCase__ = field(
default=1 , metadata={
"help": "The percentage of the train set used as validation set in case there's no validation split"
} , )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={"help": "The number of processes to use for the preprocessing."} , )
lowerCAmelCase__ = field(
default=20.0 , metadata={"help": "Filter audio files that are longer than `max_duration_in_seconds` seconds"} )
@dataclass
class __lowerCamelCase :
"""simple docstring"""
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = "longest"
lowerCAmelCase__ = None
lowerCAmelCase__ = None
def __call__( self , UpperCAmelCase ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
lowercase_ = self.feature_extractor.pad(
UpperCAmelCase , max_length=self.max_length , padding=self.padding , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , )
lowercase_ = self.model._get_feat_extract_output_lengths(batch["input_values"].shape[-1] )
lowercase_ = batch["input_values"].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
lowercase_ = self.model._get_feat_extract_output_lengths(batch["attention_mask"].sum(-1 ) ).to(
torch.long )
lowercase_ = torch.zeros(
(batch_size, mask_indices_seq_length) , dtype=torch.long , device=batch["input_values"].device )
# these two operations makes sure that all values
# before the output lengths indices are attended to
lowercase_ = 1
lowercase_ = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool()
# sample randomly masked indices
lowercase_ = _compute_mask_indices(
(batch_size, mask_indices_seq_length) , self.model.config.mask_time_prob , self.model.config.mask_time_length , attention_mask=UpperCAmelCase , min_masks=2 , )
return batch
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
def __init__( self , *UpperCAmelCase , UpperCAmelCase=1 , UpperCAmelCase=0 , UpperCAmelCase=1.0 , **UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
super().__init__(*UpperCAmelCase , **UpperCAmelCase )
lowercase_ = 0
lowercase_ = max_gumbel_temp
lowercase_ = min_gumbel_temp
lowercase_ = gumbel_temp_decay
def A__ ( self , UpperCAmelCase , UpperCAmelCase ) -> torch.Tensor:
'''simple docstring'''
model.train()
lowercase_ = self._prepare_inputs(UpperCAmelCase )
if self.use_amp:
with autocast():
lowercase_ = self.compute_loss(UpperCAmelCase , UpperCAmelCase )
else:
lowercase_ = self.compute_loss(UpperCAmelCase , UpperCAmelCase )
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
lowercase_ = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
lowercase_ = loss.sum() / (inputs["mask_time_indices"]).sum()
else:
raise ValueError(F'{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']' )
if self.args.gradient_accumulation_steps > 1:
lowercase_ = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(UpperCAmelCase ).backward()
elif self.use_apex:
with amp.scale_loss(UpperCAmelCase , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(UpperCAmelCase )
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
return loss.detach()
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowercase_ , lowercase_ , lowercase_ = parser.parse_args_into_dataclasses()
configure_logger(__lowerCamelCase , __lowerCamelCase )
# Downloading and loading a dataset from the hub.
lowercase_ = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
lowercase_ = DatasetDict()
lowercase_ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'{data_args.train_split_name}[:{data_args.validation_split_percentage}%]' , cache_dir=model_args.cache_dir , )
lowercase_ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'{data_args.train_split_name}[{data_args.validation_split_percentage}%:]' , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
lowercase_ = DatasetDict()
lowercase_ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split="validation" , cache_dir=model_args.cache_dir , )
lowercase_ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'{data_args.train_split_name}' , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
lowercase_ = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=__lowerCamelCase )
def prepare_dataset(__lowerCamelCase: Dict ):
# check that all files have the correct sampling rate
lowercase_ , lowercase_ = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
lowercase_ = datasets.map(
__lowerCamelCase , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets["train"].column_names )
# filter audio files that are too long
lowercase_ = vectorized_datasets.filter(
lambda __lowerCamelCase : len(data["speech"] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(__lowerCamelCase: Optional[Any] ):
return feature_extractor(batch["speech"] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
lowercase_ = vectorized_datasets.map(
__lowerCamelCase , batched=__lowerCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets["train"].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
lowercase_ = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
"PreTraining is only supported for ``config.do_stable_layer_norm=True`` and"
" ``config.feat_extract_norm='layer'" )
lowercase_ = WavaVecaForPreTraining(__lowerCamelCase )
lowercase_ = DataCollatorForWavaVecaPretraining(model=__lowerCamelCase , feature_extractor=__lowerCamelCase )
lowercase_ = WavaVecaPreTrainer(
model=__lowerCamelCase , data_collator=__lowerCamelCase , args=__lowerCamelCase , train_dataset=vectorized_datasets["train"] , eval_dataset=vectorized_datasets["validation"] , tokenizer=__lowerCamelCase , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 601 | 0 |
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/text-classification/requirements.txt""")
UpperCAmelCase_ = logging.getLogger(__name__)
@dataclass
class lowerCamelCase__ :
"""simple docstring"""
a__ : Optional[int] = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
a__ : bool = field(
default=lowerCamelCase_ , metadata={"help": "Overwrite the cached preprocessed datasets or not."})
a__ : bool = field(
default=lowerCamelCase_ , metadata={
"help": (
"Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
)
} , )
a__ : Optional[int] = field(
default=lowerCamelCase_ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
a__ : Optional[int] = field(
default=lowerCamelCase_ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
a__ : Optional[int] = field(
default=lowerCamelCase_ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of prediction examples to this "
"value if set."
)
} , )
@dataclass
class lowerCamelCase__ :
"""simple docstring"""
a__ : str = field(
default=lowerCamelCase_ , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"})
a__ : str = field(
default=lowerCamelCase_ , metadata={"help": "Evaluation language. Also train language if `train_language` is set to None."})
a__ : Optional[str] = field(
default=lowerCamelCase_ , metadata={"help": "Train language if it is different from the evaluation language."})
a__ : Optional[str] = field(
default=lowerCamelCase_ , metadata={"help": "Pretrained config name or path if not the same as model_name"})
a__ : Optional[str] = field(
default=lowerCamelCase_ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"})
a__ : Optional[str] = field(
default=lowerCamelCase_ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
a__ : Optional[bool] = field(
default=lowerCamelCase_ , metadata={"help": "arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()"} , )
a__ : bool = field(
default=lowerCamelCase_ , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
a__ : str = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
a__ : bool = field(
default=lowerCamelCase_ , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
a__ : bool = field(
default=lowerCamelCase_ , metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} , )
def SCREAMING_SNAKE_CASE_ ( ) -> List[str]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_A = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_A , _A , _A = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_xnli''' , SCREAMING_SNAKE_CASE_ )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_A = training_args.get_process_log_level()
logger.setLevel(SCREAMING_SNAKE_CASE_ )
datasets.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE_ )
transformers.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
_A = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_A = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
_A = load_dataset(
'''xnli''' , model_args.language , split='''train''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
_A = load_dataset(
'''xnli''' , model_args.train_language , split='''train''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
_A = train_dataset.features['''label'''].names
if training_args.do_eval:
_A = load_dataset(
'''xnli''' , model_args.language , split='''validation''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
_A = eval_dataset.features['''label'''].names
if training_args.do_predict:
_A = load_dataset(
'''xnli''' , model_args.language , split='''test''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
_A = predict_dataset.features['''label'''].names
# Labels
_A = len(SCREAMING_SNAKE_CASE_ )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_A = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=SCREAMING_SNAKE_CASE_ , idalabel={str(SCREAMING_SNAKE_CASE_ ): label for i, label in enumerate(SCREAMING_SNAKE_CASE_ )} , labelaid={label: i for i, label in enumerate(SCREAMING_SNAKE_CASE_ )} , finetuning_task='''xnli''' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_A = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_A = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
_A = '''max_length'''
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
_A = False
def preprocess_function(_snake_case :Tuple ):
# Tokenize the texts
return tokenizer(
examples['''premise'''] , examples['''hypothesis'''] , padding=SCREAMING_SNAKE_CASE_ , max_length=data_args.max_seq_length , truncation=SCREAMING_SNAKE_CASE_ , )
if training_args.do_train:
if data_args.max_train_samples is not None:
_A = min(len(SCREAMING_SNAKE_CASE_ ) , data_args.max_train_samples )
_A = train_dataset.select(range(SCREAMING_SNAKE_CASE_ ) )
with training_args.main_process_first(desc='''train dataset map pre-processing''' ):
_A = train_dataset.map(
SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on train dataset''' , )
# Log a few random samples from the training set:
for index in random.sample(range(len(SCREAMING_SNAKE_CASE_ ) ) , 3 ):
logger.info(F'''Sample {index} of the training set: {train_dataset[index]}.''' )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
_A = min(len(SCREAMING_SNAKE_CASE_ ) , data_args.max_eval_samples )
_A = eval_dataset.select(range(SCREAMING_SNAKE_CASE_ ) )
with training_args.main_process_first(desc='''validation dataset map pre-processing''' ):
_A = eval_dataset.map(
SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on validation dataset''' , )
if training_args.do_predict:
if data_args.max_predict_samples is not None:
_A = min(len(SCREAMING_SNAKE_CASE_ ) , data_args.max_predict_samples )
_A = predict_dataset.select(range(SCREAMING_SNAKE_CASE_ ) )
with training_args.main_process_first(desc='''prediction dataset map pre-processing''' ):
_A = predict_dataset.map(
SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on prediction dataset''' , )
# Get the metric function
_A = evaluate.load('''xnli''' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(_snake_case :EvalPrediction ):
_A = p.predictions[0] if isinstance(p.predictions , SCREAMING_SNAKE_CASE_ ) else p.predictions
_A = np.argmax(SCREAMING_SNAKE_CASE_ , axis=1 )
return metric.compute(predictions=SCREAMING_SNAKE_CASE_ , references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
_A = default_data_collator
elif training_args.fpaa:
_A = DataCollatorWithPadding(SCREAMING_SNAKE_CASE_ , pad_to_multiple_of=8 )
else:
_A = None
# Initialize our Trainer
_A = Trainer(
model=SCREAMING_SNAKE_CASE_ , args=SCREAMING_SNAKE_CASE_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , data_collator=SCREAMING_SNAKE_CASE_ , )
# Training
if training_args.do_train:
_A = None
if training_args.resume_from_checkpoint is not None:
_A = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_A = last_checkpoint
_A = trainer.train(resume_from_checkpoint=SCREAMING_SNAKE_CASE_ )
_A = train_result.metrics
_A = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(SCREAMING_SNAKE_CASE_ )
)
_A = min(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('''train''' , SCREAMING_SNAKE_CASE_ )
trainer.save_metrics('''train''' , SCREAMING_SNAKE_CASE_ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
_A = trainer.evaluate(eval_dataset=SCREAMING_SNAKE_CASE_ )
_A = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(SCREAMING_SNAKE_CASE_ )
_A = min(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) )
trainer.log_metrics('''eval''' , SCREAMING_SNAKE_CASE_ )
trainer.save_metrics('''eval''' , SCREAMING_SNAKE_CASE_ )
# Prediction
if training_args.do_predict:
logger.info('''*** Predict ***''' )
_A , _A , _A = trainer.predict(SCREAMING_SNAKE_CASE_ , metric_key_prefix='''predict''' )
_A = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(SCREAMING_SNAKE_CASE_ )
)
_A = min(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) )
trainer.log_metrics('''predict''' , SCREAMING_SNAKE_CASE_ )
trainer.save_metrics('''predict''' , SCREAMING_SNAKE_CASE_ )
_A = np.argmax(SCREAMING_SNAKE_CASE_ , axis=1 )
_A = os.path.join(training_args.output_dir , '''predictions.txt''' )
if trainer.is_world_process_zero():
with open(SCREAMING_SNAKE_CASE_ , '''w''' ) as writer:
writer.write('''index\tprediction\n''' )
for index, item in enumerate(SCREAMING_SNAKE_CASE_ ):
_A = label_list[item]
writer.write(F'''{index}\t{item}\n''' )
if __name__ == "__main__":
main()
| 2 |
"""simple docstring"""
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
__UpperCamelCase = logging.get_logger(__name__)
class lowerCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = CLIPConfig
SCREAMING_SNAKE_CASE_ : Optional[int] = ["""CLIPEncoderLayer"""]
def __init__( self , lowerCAmelCase__ ) -> Union[str, Any]:
super().__init__(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = CLIPVisionModelWithProjection(config.vision_config )
SCREAMING_SNAKE_CASE = nn.Linear(config.vision_config.projection_dim , 1 )
SCREAMING_SNAKE_CASE = nn.Linear(config.vision_config.projection_dim , 1 )
@torch.no_grad()
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=0.5 , lowerCAmelCase__=0.5 ) -> List[str]:
SCREAMING_SNAKE_CASE = self.vision_model(lowerCAmelCase__ )[0]
SCREAMING_SNAKE_CASE = self.p_head(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = nsfw_detected.flatten()
SCREAMING_SNAKE_CASE = nsfw_detected > p_threshold
SCREAMING_SNAKE_CASE = nsfw_detected.tolist()
if any(lowerCAmelCase__ ):
logger.warning(
'Potential NSFW content was detected in one or more images. A black image will be returned instead.'
' Try again with a different prompt and/or seed.' )
for idx, nsfw_detected_ in enumerate(lowerCAmelCase__ ):
if nsfw_detected_:
SCREAMING_SNAKE_CASE = np.zeros(images[idx].shape )
SCREAMING_SNAKE_CASE = self.w_head(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = watermark_detected.flatten()
SCREAMING_SNAKE_CASE = watermark_detected > w_threshold
SCREAMING_SNAKE_CASE = watermark_detected.tolist()
if any(lowerCAmelCase__ ):
logger.warning(
'Potential watermarked content was detected in one or more images. A black image will be returned instead.'
' Try again with a different prompt and/or seed.' )
for idx, watermark_detected_ in enumerate(lowerCAmelCase__ ):
if watermark_detected_:
SCREAMING_SNAKE_CASE = np.zeros(images[idx].shape )
return images, nsfw_detected, watermark_detected
| 247 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class SCREAMING_SNAKE_CASE ( UpperCAmelCase__ ):
"""simple docstring"""
__A = ["image_processor", "tokenizer"]
__A = "BlipImageProcessor"
__A = "AutoTokenizer"
def __init__( self , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = False
super().__init__(lowerCamelCase__ , lowerCamelCase__ )
snake_case_ = self.image_processor
def __call__( self , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = True , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = 0 , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = True , __UpperCamelCase = None , **__UpperCamelCase , ):
"""simple docstring"""
if images is None and text is None:
raise ValueError('You have to specify either images or text.' )
# Get only text
if images is None:
snake_case_ = self.tokenizer
snake_case_ = self.tokenizer(
text=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , stride=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , return_overflowing_tokens=lowerCamelCase__ , return_special_tokens_mask=lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ , return_length=lowerCamelCase__ , verbose=lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ , )
return text_encoding
# add pixel_values
snake_case_ = self.image_processor(lowerCamelCase__ , return_tensors=lowerCamelCase__ )
if text is not None:
snake_case_ = self.tokenizer(
text=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , stride=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , return_overflowing_tokens=lowerCamelCase__ , return_special_tokens_mask=lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ , return_length=lowerCamelCase__ , verbose=lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ , )
else:
snake_case_ = None
if text_encoding is not None:
encoding_image_processor.update(lowerCamelCase__ )
return encoding_image_processor
def __lowerCAmelCase ( self , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCamelCase__ , **lowerCamelCase__ )
def __lowerCAmelCase ( self , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
return self.tokenizer.decode(*lowerCamelCase__ , **lowerCamelCase__ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.tokenizer.model_input_names
snake_case_ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 718 |
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class SCREAMING_SNAKE_CASE ( __snake_case ):
"""simple docstring"""
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__UpperCamelCase , 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(__UpperCamelCase , 'neck_hidden_sizes' ) )
self.parent.assertTrue(hasattr(__UpperCamelCase , 'num_attention_heads' ) )
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , __UpperCamelCase , __UpperCamelCase=13 , __UpperCamelCase=32 , __UpperCamelCase=2 , __UpperCamelCase=3 , __UpperCamelCase=6_40 , __UpperCamelCase=4 , __UpperCamelCase="silu" , __UpperCamelCase=3 , __UpperCamelCase=32 , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=0.02 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=10 , __UpperCamelCase=None , ):
"""simple docstring"""
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = image_size
snake_case_ = patch_size
snake_case_ = num_channels
snake_case_ = last_hidden_size
snake_case_ = num_attention_heads
snake_case_ = hidden_act
snake_case_ = conv_kernel_size
snake_case_ = output_stride
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = classifier_dropout_prob
snake_case_ = use_labels
snake_case_ = is_training
snake_case_ = num_labels
snake_case_ = initializer_range
snake_case_ = scope
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ = None
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.num_labels )
snake_case_ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
snake_case_ = self.get_config()
return config, pixel_values, labels, pixel_labels
def __lowerCAmelCase ( self ):
"""simple docstring"""
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = MobileViTModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
snake_case_ = model(__UpperCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = self.num_labels
snake_case_ = MobileViTForImageClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
snake_case_ = model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = self.num_labels
snake_case_ = MobileViTForSemanticSegmentation(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
snake_case_ = model(__UpperCamelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
snake_case_ = model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ , snake_case_ = config_and_inputs
snake_case_ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , unittest.TestCase ):
"""simple docstring"""
__A = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
__A = (
{
"""feature-extraction""": MobileViTModel,
"""image-classification""": MobileViTForImageClassification,
"""image-segmentation""": MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__A = False
__A = False
__A = False
__A = False
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = MobileViTModelTester(self )
snake_case_ = MobileViTConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileViT does not use inputs_embeds' )
def __lowerCAmelCase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='MobileViT does not support input and output embeddings' )
def __lowerCAmelCase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='MobileViT does not output attentions' )
def __lowerCAmelCase ( self ):
"""simple docstring"""
pass
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(__UpperCamelCase )
snake_case_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ = [*signature.parameters.keys()]
snake_case_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __lowerCAmelCase ( self ):
"""simple docstring"""
pass
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
def check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
snake_case_ = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
snake_case_ = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
snake_case_ = outputs.hidden_states
snake_case_ = 5
self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
snake_case_ = 2
for i in range(len(__UpperCamelCase ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = True
check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case_ = True
check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__UpperCamelCase )
@slow
def __lowerCAmelCase ( self ):
"""simple docstring"""
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = MobileViTModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def a():
'''simple docstring'''
snake_case_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowerCAmelCase ( self ):
"""simple docstring"""
return MobileViTImageProcessor.from_pretrained('apple/mobilevit-xx-small' ) if is_vision_available() else None
@slow
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = MobileViTForImageClassification.from_pretrained('apple/mobilevit-xx-small' ).to(__UpperCamelCase )
snake_case_ = self.default_image_processor
snake_case_ = prepare_img()
snake_case_ = image_processor(images=__UpperCamelCase , return_tensors='pt' ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
snake_case_ = model(**__UpperCamelCase )
# verify the logits
snake_case_ = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
snake_case_ = torch.tensor([-1.9364, -1.2327, -0.4653] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCamelCase , atol=1E-4 ) )
@slow
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
snake_case_ = model.to(__UpperCamelCase )
snake_case_ = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
snake_case_ = prepare_img()
snake_case_ = image_processor(images=__UpperCamelCase , return_tensors='pt' ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
snake_case_ = model(**__UpperCamelCase )
snake_case_ = outputs.logits
# verify the logits
snake_case_ = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , __UpperCamelCase )
snake_case_ = torch.tensor(
[
[[6.9713, 6.9786, 7.2422], [7.2893, 7.2825, 7.4446], [7.6580, 7.8797, 7.9420]],
[[-10.6869, -10.3250, -10.3471], [-10.4228, -9.9868, -9.7132], [-11.0405, -11.0221, -10.7318]],
[[-3.3089, -2.8539, -2.6740], [-3.2706, -2.5621, -2.5108], [-3.2534, -2.6615, -2.6651]],
] , device=__UpperCamelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __UpperCamelCase , atol=1E-4 ) )
@slow
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
snake_case_ = model.to(__UpperCamelCase )
snake_case_ = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
snake_case_ = prepare_img()
snake_case_ = image_processor(images=__UpperCamelCase , return_tensors='pt' ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
snake_case_ = model(**__UpperCamelCase )
snake_case_ = outputs.logits.detach().cpu()
snake_case_ = image_processor.post_process_semantic_segmentation(outputs=__UpperCamelCase , target_sizes=[(50, 60)] )
snake_case_ = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , __UpperCamelCase )
snake_case_ = image_processor.post_process_semantic_segmentation(outputs=__UpperCamelCase )
snake_case_ = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , __UpperCamelCase )
| 46 | 0 |
"""simple docstring"""
from __future__ import annotations
def snake_case ( UpperCamelCase__ : list[int] , UpperCamelCase__ : int ) -> list[list[int]]:
lowerCamelCase : list[list[int]] = []
lowerCamelCase : list[int] = []
lowerCamelCase : Union[str, Any] = 0
lowerCamelCase : Dict = sum(UpperCamelCase__ )
create_state_space_tree(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return result
def snake_case ( UpperCamelCase__ : list[int] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : list[int] , UpperCamelCase__ : list[list[int]] , UpperCamelCase__ : int , ) -> None:
if sum(UpperCamelCase__ ) > max_sum or (remaining_nums_sum + sum(UpperCamelCase__ )) < max_sum:
return
if sum(UpperCamelCase__ ) == max_sum:
result.append(UpperCamelCase__ )
return
for index in range(UpperCamelCase__ , len(UpperCamelCase__ ) ):
create_state_space_tree(
UpperCamelCase__ , UpperCamelCase__ , index + 1 , [*path, nums[index]] , UpperCamelCase__ , remaining_nums_sum - nums[index] , )
__lowerCamelCase :Dict = [3, 34, 4, 12, 5, 2]
__lowerCamelCase :int = 9
__lowerCamelCase :Union[str, Any] = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 222 |
"""simple docstring"""
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__lowerCamelCase :Tuple = logging.get_logger(__name__)
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : str =['''input_features''']
def __init__( self: Dict , __a: Dict=80 , __a: str=16_000 , __a: int=160 , __a: Tuple=30 , __a: List[str]=400 , __a: Union[str, Any]=0.0 , __a: str=False , **__a: List[Any] , )-> Union[str, Any]:
super().__init__(
feature_size=__a , sampling_rate=__a , padding_value=__a , return_attention_mask=__a , **__a , )
lowerCamelCase : List[str] = n_fft
lowerCamelCase : Optional[int] = hop_length
lowerCamelCase : List[Any] = chunk_length
lowerCamelCase : Tuple = chunk_length * sampling_rate
lowerCamelCase : Optional[Any] = self.n_samples // hop_length
lowerCamelCase : int = sampling_rate
lowerCamelCase : Optional[int] = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__a , min_frequency=0.0 , max_frequency=80_00.0 , sampling_rate=__a , norm="""slaney""" , mel_scale="""slaney""" , )
def a__ ( self: int , __a: np.array )-> np.ndarray:
lowerCamelCase : Tuple = spectrogram(
__a , window_function(self.n_fft , """hann""" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel="""log10""" , )
lowerCamelCase : Union[str, Any] = log_spec[:, :-1]
lowerCamelCase : Optional[Any] = np.maximum(__a , log_spec.max() - 8.0 )
lowerCamelCase : Any = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def a__ ( __a: List[np.ndarray] , __a: List[np.ndarray] , __a: float = 0.0 )-> List[np.ndarray]:
if attention_mask is not None:
lowerCamelCase : int = np.array(__a , np.intaa )
lowerCamelCase : Any = []
for vector, length in zip(__a , attention_mask.sum(-1 ) ):
lowerCamelCase : Optional[Any] = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
lowerCamelCase : Tuple = padding_value
normed_input_values.append(__a )
else:
lowerCamelCase : Any = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def __call__( self: str , __a: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __a: bool = True , __a: Optional[int] = None , __a: Optional[Union[str, TensorType]] = None , __a: Optional[bool] = None , __a: Optional[str] = "max_length" , __a: Optional[int] = None , __a: Optional[int] = None , __a: Optional[bool] = None , **__a: List[Any] , )-> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'
f' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'
f' was sampled with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
lowerCamelCase : int = isinstance(__a , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
lowerCamelCase : Optional[int] = is_batched_numpy or (
isinstance(__a , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCamelCase : Tuple = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(__a , np.ndarray ):
lowerCamelCase : str = np.asarray(__a , dtype=np.floataa )
elif isinstance(__a , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCamelCase : Optional[Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCamelCase : List[Any] = [np.asarray([raw_speech] ).T]
lowerCamelCase : Optional[int] = BatchFeature({"""input_features""": raw_speech} )
# convert into correct format for padding
lowerCamelCase : Tuple = self.pad(
__a , padding=__a , max_length=max_length if max_length else self.n_samples , truncation=__a , pad_to_multiple_of=__a , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
lowerCamelCase : int = self.zero_mean_unit_var_norm(
padded_inputs["""input_features"""] , attention_mask=padded_inputs["""attention_mask"""] , padding_value=self.padding_value , )
lowerCamelCase : Any = np.stack(padded_inputs["""input_features"""] , axis=0 )
# make sure list is in array format
lowerCamelCase : int = padded_inputs.get("""input_features""" ).transpose(2 , 0 , 1 )
lowerCamelCase : str = [self._np_extract_fbank_features(__a ) for waveform in input_features[0]]
if isinstance(input_features[0] , __a ):
lowerCamelCase : List[Any] = [np.asarray(__a , dtype=np.floataa ) for feature in input_features]
else:
lowerCamelCase : int = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
lowerCamelCase : Dict = padded_inputs["""attention_mask"""][:, :: self.hop_length]
if return_tensors is not None:
lowerCamelCase : Optional[Any] = padded_inputs.convert_to_tensors(__a )
return padded_inputs
def a__ ( self: Optional[int] )-> Dict[str, Any]:
lowerCamelCase : Dict = copy.deepcopy(self.__dict__ )
lowerCamelCase : Optional[int] = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 222 | 1 |
'''simple docstring'''
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class _a ( tf.keras.layers.Layer ):
def __init__( self : Dict , lowercase : Dict[str, int] , lowercase : List[str] , lowercase : int = None , lowercase : int = None ):
'''simple docstring'''
super().__init__()
UpperCAmelCase = pad_token_id
UpperCAmelCase = max_length
UpperCAmelCase = vocab
UpperCAmelCase = merges
UpperCAmelCase = BytePairTokenizer(lowercase , lowercase , sequence_length=lowercase )
@classmethod
def A ( cls : Dict , lowercase : GPTaTokenizer , *lowercase : Optional[Any] , **lowercase : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase = [''' '''.join(lowercase ) for m in tokenizer.bpe_ranks.keys()]
UpperCAmelCase = tokenizer.get_vocab()
return cls(lowercase , lowercase , *lowercase , **lowercase )
@classmethod
def A ( cls : Any , lowercase : Union[str, os.PathLike] , *lowercase : List[str] , **lowercase : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase = GPTaTokenizer.from_pretrained(lowercase , *lowercase , **lowercase )
return cls.from_tokenizer(lowercase , *lowercase , **lowercase )
@classmethod
def A ( cls : str , lowercase : Union[str, Any] ):
'''simple docstring'''
return cls(**lowercase )
def A ( self : Optional[int] ):
'''simple docstring'''
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def A ( self : str , lowercase : Tuple , lowercase : int = None ):
'''simple docstring'''
UpperCAmelCase = self.tf_tokenizer(lowercase )
UpperCAmelCase = tf.ones_like(lowercase )
if self.pad_token_id is not None:
# pad the tokens up to max length
UpperCAmelCase = max_length if max_length is not None else self.max_length
if max_length is not None:
UpperCAmelCase , UpperCAmelCase = pad_model_inputs(
lowercase , max_seq_length=lowercase , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 358 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class _a ( __a , unittest.TestCase ):
__a : Tuple = ShapEPipeline
__a : Tuple = ["""prompt"""]
__a : Tuple = ["""prompt"""]
__a : str = [
"""num_images_per_prompt""",
"""num_inference_steps""",
"""generator""",
"""latents""",
"""guidance_scale""",
"""frame_size""",
"""output_type""",
"""return_dict""",
]
__a : Union[str, Any] = False
@property
def A ( self : Optional[Any] ):
'''simple docstring'''
return 32
@property
def A ( self : List[Any] ):
'''simple docstring'''
return 32
@property
def A ( self : Union[str, Any] ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def A ( self : Dict ):
'''simple docstring'''
return 8
@property
def A ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def A ( self : Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModelWithProjection(lowercase )
@property
def A ( self : Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 16,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 32,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
UpperCAmelCase = PriorTransformer(**lowercase )
return model
@property
def A ( self : List[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase = {
'''param_shapes''': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 12,
'''background''': (
0.1,
0.1,
0.1,
),
}
UpperCAmelCase = ShapERenderer(**lowercase )
return model
def A ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase = self.dummy_prior
UpperCAmelCase = self.dummy_text_encoder
UpperCAmelCase = self.dummy_tokenizer
UpperCAmelCase = self.dummy_renderer
UpperCAmelCase = HeunDiscreteScheduler(
beta_schedule='''exp''' , num_train_timesteps=1_024 , prediction_type='''sample''' , use_karras_sigmas=lowercase , clip_sample=lowercase , clip_sample_range=1.0 , )
UpperCAmelCase = {
'''prior''': prior,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def A ( self : Dict , lowercase : Optional[int] , lowercase : Optional[int]=0 ):
'''simple docstring'''
if str(lowercase ).startswith('''mps''' ):
UpperCAmelCase = torch.manual_seed(lowercase )
else:
UpperCAmelCase = torch.Generator(device=lowercase ).manual_seed(lowercase )
UpperCAmelCase = {
'''prompt''': '''horse''',
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 32,
'''output_type''': '''np''',
}
return inputs
def A ( self : str ):
'''simple docstring'''
UpperCAmelCase = '''cpu'''
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = self.pipeline_class(**lowercase )
UpperCAmelCase = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
UpperCAmelCase = pipe(**self.get_dummy_inputs(lowercase ) )
UpperCAmelCase = output.images[0]
UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
UpperCAmelCase = np.array(
[
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def A ( self : int ):
'''simple docstring'''
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def A ( self : Dict ):
'''simple docstring'''
UpperCAmelCase = torch_device == '''cpu'''
UpperCAmelCase = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=lowercase , relax_max_difference=lowercase , )
def A ( self : Any ):
'''simple docstring'''
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = self.pipeline_class(**lowercase )
UpperCAmelCase = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
UpperCAmelCase = 1
UpperCAmelCase = 2
UpperCAmelCase = self.get_dummy_inputs(lowercase )
for key in inputs.keys():
if key in self.batch_params:
UpperCAmelCase = batch_size * [inputs[key]]
UpperCAmelCase = pipe(**lowercase , num_images_per_prompt=lowercase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
def A ( self : str ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A ( self : int ):
'''simple docstring'''
UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_np_out.npy''' )
UpperCAmelCase = ShapEPipeline.from_pretrained('''openai/shap-e''' )
UpperCAmelCase = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
UpperCAmelCase = torch.Generator(device=lowercase ).manual_seed(0 )
UpperCAmelCase = pipe(
'''a shark''' , generator=lowercase , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(lowercase , lowercase )
| 358 | 1 |
'''simple docstring'''
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase )-> bool:
__UpperCAmelCase = len(_lowerCAmelCase ) + 1
__UpperCAmelCase = len(_lowerCAmelCase ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
__UpperCAmelCase = [[0 for i in range(_lowerCAmelCase )] for j in range(_lowerCAmelCase )]
# since string of zero length match pattern of zero length
__UpperCAmelCase = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , _lowerCAmelCase ):
__UpperCAmelCase = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , _lowerCAmelCase ):
__UpperCAmelCase = dp[0][j - 2] if pattern[j - 1] == '*' else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , _lowerCAmelCase ):
for j in range(1 , _lowerCAmelCase ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
__UpperCAmelCase = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
__UpperCAmelCase = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
__UpperCAmelCase = dp[i - 1][j]
else:
__UpperCAmelCase = 0
else:
__UpperCAmelCase = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
_A: Any = """aab"""
_A: Any = """c*a*b"""
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(F"""{input_string} matches the given pattern {pattern}""")
else:
print(F"""{input_string} does not match with the given pattern {pattern}""")
| 126 |
'''simple docstring'''
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
_A: List[Any] = datasets.load_iris()
_A: Union[str, Any] = np.array(data["""data"""])
_A: Union[str, Any] = np.array(data["""target"""])
_A: Dict = data["""target_names"""]
_A , _A , _A , _A: List[str] = train_test_split(X, y)
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase )-> Optional[int]:
return np.linalg.norm(np.array(_lowerCAmelCase ) - np.array(_lowerCAmelCase ) )
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=5 )-> int:
__UpperCAmelCase = zip(_lowerCAmelCase , _lowerCAmelCase )
# List of distances of all points from the point to be classified
__UpperCAmelCase = []
for data_point in data:
__UpperCAmelCase = euclidean_distance(data_point[0] , _lowerCAmelCase )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
__UpperCAmelCase = [i[1] for i in sorted(_lowerCAmelCase )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
__UpperCAmelCase = Counter(_lowerCAmelCase ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 126 | 1 |
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
_UpperCamelCase : Optional[int] =open # noqa: we just need to have a builtin inside this module to test it properly
| 332 |
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
_UpperCamelCase : Union[str, Any] =['\nclass', '\ndef', '\n#', '\n@', '\nprint', '\nif']
class UpperCAmelCase__ ( __snake_case ):
def __init__( self ,A__ ,A__ ,A__=None ,A__=1 ):
_A : str = tokenizer
_A : Dict = dataset
_A : int = len(A__ ) if n_tasks is None else n_tasks
_A : List[Any] = n_copies
def __iter__( self ):
_A : Dict = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['''prompt'''].strip() )
_A : Any = self.tokenizer(A__ ,padding=A__ ,return_tensors='''pt''' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class UpperCAmelCase__ ( __snake_case ):
def __init__( self ,A__ ,A__ ,A__ ):
_A : Optional[Any] = start_length
_A : int = eof_strings
_A : int = tokenizer
def __call__( self ,A__ ,A__ ,**A__ ):
_A : Union[str, Any] = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
_A : Any = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(A__ )
def a__ (__lowercase :Optional[Any] ) -> List[Any]:
_A : str = re.split('''(%s)''' % '''|'''.join(__lowercase ) , __lowercase )
# last string should be ""
return "".join(string_list[:-2] )
def a__ (__lowercase :List[str] , __lowercase :Dict , __lowercase :List[str] , __lowercase :Optional[int] , __lowercase :List[Any] , __lowercase :str=20 , **__lowercase :List[str] ) -> Optional[Any]:
_A : Any = defaultdict(__lowercase ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(__lowercase ) ):
with torch.no_grad():
_A : int = batch['''ids'''].shape[-1]
_A : Any = accelerator.unwrap_model(__lowercase ).generate(
input_ids=batch['''ids'''][:, : batch['''input_len''']] , num_return_sequences=__lowercase , **__lowercase )
# each task is generated batch_size times
_A : Union[str, Any] = batch['''task_id'''].repeat(__lowercase )
_A : int = accelerator.pad_across_processes(
__lowercase , dim=1 , pad_index=tokenizer.pad_token_id )
_A , _A : List[str] = accelerator.gather((generated_tokens, generated_tasks) )
_A : List[str] = generated_tokens.cpu().numpy()
_A : List[str] = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(__lowercase , __lowercase ):
gen_token_dict[task].append(__lowercase )
_A : Tuple = [[] for _ in range(__lowercase )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
_A : Dict = tokenizer.decode(__lowercase , skip_special_tokens=__lowercase , clean_up_tokenization_spaces=__lowercase )
code_gens[task].append(remove_last_block(__lowercase ) )
return code_gens
def a__ () -> Dict:
# Setup configuration
_A : Any = HfArgumentParser(__lowercase )
_A : Union[str, Any] = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
_A : Tuple = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
_A : List[Any] = '''false'''
if args.num_workers is None:
_A : List[Any] = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
_A : List[str] = Accelerator()
set_seed(args.seed , device_specific=__lowercase )
# Load model and tokenizer
_A : Optional[Any] = AutoTokenizer.from_pretrained(args.model_ckpt )
_A : int = tokenizer.eos_token
_A : Any = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
_A : int = {
'''do_sample''': args.do_sample,
'''temperature''': args.temperature,
'''max_new_tokens''': args.max_new_tokens,
'''top_p''': args.top_p,
'''top_k''': args.top_k,
'''stopping_criteria''': StoppingCriteriaList([EndOfFunctionCriteria(0 , __lowercase , __lowercase )] ),
}
# Load evaluation dataset and metric
_A : str = load_dataset('''openai_humaneval''' )
_A : List[Any] = load_metric('''code_eval''' )
_A : Optional[Any] = args.num_tasks if args.num_tasks is not None else len(human_eval['''test'''] )
_A : Tuple = args.n_samples // args.batch_size
_A : List[Any] = TokenizedDataset(__lowercase , human_eval['''test'''] , n_copies=__lowercase , n_tasks=__lowercase )
# do not confuse args.batch_size, which is actually the num_return_sequences
_A : Tuple = DataLoader(__lowercase , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
_A : List[str] = code_eval_metric.compute(references=[''''''] , predictions=[['''''']] )
except ValueError as exception:
print(
'''Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'''
''' flag to enable code evaluation.''' )
raise exception
_A , _A : Optional[Any] = accelerator.prepare(__lowercase , __lowercase )
_A : Tuple = complete_code(
__lowercase , __lowercase , __lowercase , __lowercase , n_tasks=__lowercase , batch_size=args.batch_size , **__lowercase , )
if accelerator.is_main_process:
_A : int = []
for task in tqdm(range(__lowercase ) ):
_A : List[str] = human_eval['''test'''][task]['''test''']
_A : int = f"""check({human_eval['test'][task]['entry_point']})"""
references.append('''\n''' + test_func + '''\n''' + entry_point )
# Evaluate completions with "code_eval" metric
_A , _A : Union[str, Any] = code_eval_metric.compute(
references=__lowercase , predictions=__lowercase , num_workers=args.num_workers )
print(f"""Results: {pass_at_k}""" )
# Save results to json file
with open(args.output_file , '''w''' ) as fp:
json.dump(__lowercase , __lowercase )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 332 | 1 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
_snake_case = logging.get_logger(__name__)
_snake_case = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_snake_case = {
'''vocab_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
_snake_case = {
'''vocab_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
_snake_case = {
'''vocab_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'''
),
},
}
_snake_case = {
'''facebook/dpr-ctx_encoder-single-nq-base''': 512,
'''facebook/dpr-ctx_encoder-multiset-base''': 512,
}
_snake_case = {
'''facebook/dpr-question_encoder-single-nq-base''': 512,
'''facebook/dpr-question_encoder-multiset-base''': 512,
}
_snake_case = {
'''facebook/dpr-reader-single-nq-base''': 512,
'''facebook/dpr-reader-multiset-base''': 512,
}
_snake_case = {
'''facebook/dpr-ctx_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-ctx_encoder-multiset-base''': {'''do_lower_case''': True},
}
_snake_case = {
'''facebook/dpr-question_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-question_encoder-multiset-base''': {'''do_lower_case''': True},
}
_snake_case = {
'''facebook/dpr-reader-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-reader-multiset-base''': {'''do_lower_case''': True},
}
class _lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple =VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : int =CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : str =CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : List[Any] =CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class _lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] =VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : List[Any] =QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : List[Any] =QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : Tuple =QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
_snake_case = collections.namedtuple(
'''DPRSpanPrediction''', ['''span_score''', '''relevance_score''', '''doc_id''', '''start_index''', '''end_index''', '''text''']
)
_snake_case = collections.namedtuple('''DPRReaderOutput''', ['''start_logits''', '''end_logits''', '''relevance_logits'''])
_snake_case = r'''
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
```
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
```
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `\'tf\'`: Return TensorFlow `tf.constant` objects.
- `\'pt\'`: Return PyTorch `torch.Tensor` objects.
- `\'np\'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer\'s default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Returns:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
'''
@add_start_docstrings(__magic_name__ )
class _lowerCAmelCase :
"""simple docstring"""
def __call__( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[str] = None , SCREAMING_SNAKE_CASE__ : Optional[str] = None , SCREAMING_SNAKE_CASE__ : Union[bool, str] = False , SCREAMING_SNAKE_CASE__ : Union[bool, str] = False , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , **SCREAMING_SNAKE_CASE__ : Union[str, Any] , ):
"""simple docstring"""
if titles is None and texts is None:
return super().__call__(
SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
elif titles is None or texts is None:
UpperCamelCase = titles if texts is None else texts
return super().__call__(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
UpperCamelCase = titles if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else [titles]
UpperCamelCase = texts if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else [texts]
UpperCamelCase = len(SCREAMING_SNAKE_CASE__ )
UpperCamelCase = questions if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else [questions] * n_passages
if len(SCREAMING_SNAKE_CASE__ ) != len(SCREAMING_SNAKE_CASE__ ):
raise ValueError(
F'There should be as many titles than texts but got {len(SCREAMING_SNAKE_CASE__ )} titles and {len(SCREAMING_SNAKE_CASE__ )} texts.' )
UpperCamelCase = super().__call__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ )['input_ids']
UpperCamelCase = super().__call__(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ )['input_ids']
UpperCamelCase = {
'input_ids': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
]
}
if return_attention_mask is not False:
UpperCamelCase = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
UpperCamelCase = attention_mask
return self.pad(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : BatchEncoding , SCREAMING_SNAKE_CASE__ : DPRReaderOutput , SCREAMING_SNAKE_CASE__ : int = 16 , SCREAMING_SNAKE_CASE__ : int = 64 , SCREAMING_SNAKE_CASE__ : int = 4 , ):
"""simple docstring"""
UpperCamelCase = reader_input['input_ids']
UpperCamelCase , UpperCamelCase , UpperCamelCase = reader_output[:3]
UpperCamelCase = len(SCREAMING_SNAKE_CASE__ )
UpperCamelCase = sorted(range(SCREAMING_SNAKE_CASE__ ) , reverse=SCREAMING_SNAKE_CASE__ , key=relevance_logits.__getitem__ )
UpperCamelCase = []
for doc_id in sorted_docs:
UpperCamelCase = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
UpperCamelCase = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
UpperCamelCase = sequence_ids.index(self.pad_token_id )
else:
UpperCamelCase = len(SCREAMING_SNAKE_CASE__ )
UpperCamelCase = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=SCREAMING_SNAKE_CASE__ , top_spans=SCREAMING_SNAKE_CASE__ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=SCREAMING_SNAKE_CASE__ , start_index=SCREAMING_SNAKE_CASE__ , end_index=SCREAMING_SNAKE_CASE__ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(SCREAMING_SNAKE_CASE__ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def __lowerCAmelCase ( self : int , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , ):
"""simple docstring"""
UpperCamelCase = []
for start_index, start_score in enumerate(SCREAMING_SNAKE_CASE__ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
UpperCamelCase = sorted(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : x[1] , reverse=SCREAMING_SNAKE_CASE__ )
UpperCamelCase = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(F'Wrong span indices: [{start_index}:{end_index}]' )
UpperCamelCase = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(F'Span is too long: {length} > {max_answer_length}' )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(SCREAMING_SNAKE_CASE__ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(__magic_name__ )
class _lowerCAmelCase ( __magic_name__ , __magic_name__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple =VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : Optional[int] =READER_PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : Any =READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : Optional[Any] =READER_PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE_ : str =["input_ids", "attention_mask"]
| 282 |
def __lowerCamelCase ( _lowercase ) -> str:
return "".join(chr(ord(_lowercase ) - 32 ) if 'a' <= char <= 'z' else char for char in word )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 282 | 1 |
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
__lowerCAmelCase : Dict = logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase_ )
class __lowerCAmelCase ( lowerCAmelCase_ ):
"""simple docstring"""
def __init__( self : int , *_snake_case : List[Any] , **_snake_case : List[Any] ):
super().__init__(*_snake_case , **_snake_case )
requires_backends(self , '''vision''' )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def snake_case_ ( self : List[Any] , _snake_case : List[str]=None ):
__lowercase : int = {}
if top_k is not None:
__lowercase : str = top_k
return {}, {}, postprocess_params
def __call__( self : Optional[Any] , _snake_case : Union[str, List[str], "Image.Image", List["Image.Image"]] , **_snake_case : List[str] ):
return super().__call__(_snake_case , **_snake_case )
def snake_case_ ( self : Optional[Any] , _snake_case : List[str] ):
__lowercase : Optional[Any] = load_image(_snake_case )
__lowercase : Dict = self.image_processor(images=_snake_case , return_tensors=self.framework )
return model_inputs
def snake_case_ ( self : List[str] , _snake_case : int ):
__lowercase : int = self.model(**_snake_case )
return model_outputs
def snake_case_ ( self : Tuple , _snake_case : List[Any] , _snake_case : List[Any]=5 ):
if top_k > self.model.config.num_labels:
__lowercase : Optional[Any] = self.model.config.num_labels
if self.framework == "pt":
__lowercase : str = model_outputs.logits.softmax(-1 )[0]
__lowercase : List[str] = probs.topk(_snake_case )
elif self.framework == "tf":
__lowercase : Dict = stable_softmax(model_outputs.logits , axis=-1 )[0]
__lowercase : Dict = tf.math.top_k(_snake_case , k=_snake_case )
__lowercase : Tuple = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(F'Unsupported framework: {self.framework}' )
__lowercase : Tuple = scores.tolist()
__lowercase : Optional[Any] = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(_snake_case , _snake_case )]
| 717 |
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
__lowerCAmelCase : Tuple = "\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
__lowerCAmelCase : Dict = "\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n"
__lowerCAmelCase : Optional[int] = "\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=[\"About 95 species are currently accepted .\"]\n >>> predictions=[\"About 95 you now get in .\"]\n >>> references=[[\"About 95 species are currently known .\"]]\n >>> wiki_split = datasets.load_metric(\"wiki_split\")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {'sari': 21.805555555555557, 'sacrebleu': 14.535768424205482, 'exact': 0.0}\n"
def UpperCAmelCase_ ( __lowerCAmelCase ) -> str:
def remove_articles(__lowerCAmelCase ):
__lowercase : int = re.compile(R'''\b(a|an|the)\b''' , re.UNICODE )
return re.sub(__lowerCAmelCase , ''' ''' , __lowerCAmelCase )
def white_space_fix(__lowerCAmelCase ):
return " ".join(text.split() )
def remove_punc(__lowerCAmelCase ):
__lowercase : int = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__lowerCAmelCase ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__lowerCAmelCase ) ) ) )
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase ) -> List[str]:
return int(normalize_answer(__lowerCAmelCase ) == normalize_answer(__lowerCAmelCase ) )
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]:
__lowercase : Union[str, Any] = [any(compute_exact(__lowerCAmelCase , __lowerCAmelCase ) for ref in refs ) for pred, refs in zip(__lowerCAmelCase , __lowerCAmelCase )]
return (sum(__lowerCAmelCase ) / len(__lowerCAmelCase )) * 100
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]:
__lowercase : Union[str, Any] = [rgram for rgrams in rgramslist for rgram in rgrams]
__lowercase : Dict = Counter(__lowerCAmelCase )
__lowercase : Union[str, Any] = Counter(__lowerCAmelCase )
__lowercase : str = Counter()
for sgram, scount in sgramcounter.items():
__lowercase : List[Any] = scount * numref
__lowercase : Optional[Any] = Counter(__lowerCAmelCase )
__lowercase : Union[str, Any] = Counter()
for cgram, ccount in cgramcounter.items():
__lowercase : Union[str, Any] = ccount * numref
# KEEP
__lowercase : Optional[Any] = sgramcounter_rep & cgramcounter_rep
__lowercase : Dict = keepgramcounter_rep & rgramcounter
__lowercase : str = sgramcounter_rep & rgramcounter
__lowercase : Tuple = 0
__lowercase : int = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
__lowercase : Any = 1
__lowercase : Union[str, Any] = 1
if len(__lowerCAmelCase ) > 0:
__lowercase : List[str] = keeptmpscorea / len(__lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
__lowercase : Dict = keeptmpscorea / sum(keepgramcounterall_rep.values() )
__lowercase : Dict = 0
if keepscore_precision > 0 or keepscore_recall > 0:
__lowercase : str = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
__lowercase : Tuple = sgramcounter_rep - cgramcounter_rep
__lowercase : str = delgramcounter_rep - rgramcounter
__lowercase : int = sgramcounter_rep - rgramcounter
__lowercase : Dict = 0
__lowercase : List[str] = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
__lowercase : Union[str, Any] = 1
if len(__lowerCAmelCase ) > 0:
__lowercase : Union[str, Any] = deltmpscorea / len(__lowerCAmelCase )
# ADDITION
__lowercase : int = set(__lowerCAmelCase ) - set(__lowerCAmelCase )
__lowercase : Tuple = set(__lowerCAmelCase ) & set(__lowerCAmelCase )
__lowercase : Any = set(__lowerCAmelCase ) - set(__lowerCAmelCase )
__lowercase : Tuple = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
__lowercase : int = 1
__lowercase : Any = 1
if len(__lowerCAmelCase ) > 0:
__lowercase : Any = addtmpscore / len(__lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
__lowercase : str = addtmpscore / len(__lowerCAmelCase )
__lowercase : int = 0
if addscore_precision > 0 or addscore_recall > 0:
__lowercase : Any = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
__lowercase : Tuple = len(__lowerCAmelCase )
__lowercase : str = ssent.split(''' ''' )
__lowercase : List[str] = csent.split(''' ''' )
__lowercase : Tuple = []
__lowercase : Any = []
__lowercase : Optional[Any] = []
__lowercase : Optional[int] = []
__lowercase : Dict = []
__lowercase : Optional[int] = []
__lowercase : Dict = []
__lowercase : str = []
__lowercase : Dict = []
__lowercase : List[Any] = []
for rsent in rsents:
__lowercase : Any = rsent.split(''' ''' )
__lowercase : Optional[int] = []
__lowercase : List[Any] = []
__lowercase : Tuple = []
ragramslist.append(__lowerCAmelCase )
for i in range(0 , len(__lowerCAmelCase ) - 1 ):
if i < len(__lowerCAmelCase ) - 1:
__lowercase : List[str] = ragrams[i] + ''' ''' + ragrams[i + 1]
ragrams.append(__lowerCAmelCase )
if i < len(__lowerCAmelCase ) - 2:
__lowercase : Union[str, Any] = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2]
ragrams.append(__lowerCAmelCase )
if i < len(__lowerCAmelCase ) - 3:
__lowercase : int = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2] + ''' ''' + ragrams[i + 3]
ragrams.append(__lowerCAmelCase )
ragramslist.append(__lowerCAmelCase )
ragramslist.append(__lowerCAmelCase )
ragramslist.append(__lowerCAmelCase )
for i in range(0 , len(__lowerCAmelCase ) - 1 ):
if i < len(__lowerCAmelCase ) - 1:
__lowercase : Dict = sagrams[i] + ''' ''' + sagrams[i + 1]
sagrams.append(__lowerCAmelCase )
if i < len(__lowerCAmelCase ) - 2:
__lowercase : int = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2]
sagrams.append(__lowerCAmelCase )
if i < len(__lowerCAmelCase ) - 3:
__lowercase : str = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2] + ''' ''' + sagrams[i + 3]
sagrams.append(__lowerCAmelCase )
for i in range(0 , len(__lowerCAmelCase ) - 1 ):
if i < len(__lowerCAmelCase ) - 1:
__lowercase : int = cagrams[i] + ''' ''' + cagrams[i + 1]
cagrams.append(__lowerCAmelCase )
if i < len(__lowerCAmelCase ) - 2:
__lowercase : Any = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2]
cagrams.append(__lowerCAmelCase )
if i < len(__lowerCAmelCase ) - 3:
__lowercase : Optional[int] = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2] + ''' ''' + cagrams[i + 3]
cagrams.append(__lowerCAmelCase )
((__lowercase) , (__lowercase) , (__lowercase)) : List[str] = SARIngram(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
((__lowercase) , (__lowercase) , (__lowercase)) : Optional[int] = SARIngram(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
((__lowercase) , (__lowercase) , (__lowercase)) : Optional[Any] = SARIngram(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
((__lowercase) , (__lowercase) , (__lowercase)) : Any = SARIngram(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__lowercase : List[Any] = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
__lowercase : Any = sum([delascore, delascore, delascore, delascore] ) / 4
__lowercase : int = sum([addascore, addascore, addascore, addascore] ) / 4
__lowercase : str = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase = True , __lowerCAmelCase = "13a" , __lowerCAmelCase = True ) -> List[str]:
# Normalization is requried for the ASSET dataset (one of the primary
# datasets in sentence simplification) to allow using space
# to split the sentence. Even though Wiki-Auto and TURK datasets,
# do not require normalization, we do it for consistency.
# Code adapted from the EASSE library [1] written by the authors of the ASSET dataset.
# [1] https://github.com/feralvam/easse/blob/580bba7e1378fc8289c663f864e0487188fe8067/easse/utils/preprocessing.py#L7
if lowercase:
__lowercase : List[Any] = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
__lowercase : Union[str, Any] = sacrebleu.metrics.bleu._get_tokenizer(__lowerCAmelCase )()(__lowerCAmelCase )
else:
__lowercase : str = sacrebleu.TOKENIZERS[tokenizer]()(__lowerCAmelCase )
elif tokenizer == "moses":
__lowercase : Tuple = sacremoses.MosesTokenizer().tokenize(__lowerCAmelCase , return_str=__lowerCAmelCase , escape=__lowerCAmelCase )
elif tokenizer == "penn":
__lowercase : str = sacremoses.MosesTokenizer().penn_tokenize(__lowerCAmelCase , return_str=__lowerCAmelCase )
else:
__lowercase : int = sentence
if not return_str:
__lowercase : List[Any] = normalized_sent.split()
return normalized_sent
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
if not (len(__lowerCAmelCase ) == len(__lowerCAmelCase ) == len(__lowerCAmelCase )):
raise ValueError('''Sources length must match predictions and references lengths.''' )
__lowercase : str = 0
for src, pred, refs in zip(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
sari_score += SARIsent(normalize(__lowerCAmelCase ) , normalize(__lowerCAmelCase ) , [normalize(__lowerCAmelCase ) for sent in refs] )
__lowercase : int = sari_score / len(__lowerCAmelCase )
return 100 * sari_score
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase="exp" , __lowerCAmelCase=None , __lowerCAmelCase=False , __lowerCAmelCase=False , __lowerCAmelCase=False , ) -> str:
__lowercase : List[Any] = len(references[0] )
if any(len(__lowerCAmelCase ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
__lowercase : int = [[refs[i] for refs in references] for i in range(__lowerCAmelCase )]
__lowercase : List[str] = sacrebleu.corpus_bleu(
__lowerCAmelCase , __lowerCAmelCase , smooth_method=__lowerCAmelCase , smooth_value=__lowerCAmelCase , force=__lowerCAmelCase , lowercase=__lowerCAmelCase , use_effective_order=__lowerCAmelCase , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def snake_case_ ( self : Dict ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=[
'''https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py''',
'''https://github.com/cocoxu/simplification/blob/master/SARI.py''',
'''https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py''',
'''https://github.com/mjpost/sacreBLEU''',
] , reference_urls=[
'''https://www.aclweb.org/anthology/Q16-1029.pdf''',
'''https://github.com/mjpost/sacreBLEU''',
'''https://en.wikipedia.org/wiki/BLEU''',
'''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''',
] , )
def snake_case_ ( self : Optional[int] , _snake_case : Optional[Any] , _snake_case : List[Any] , _snake_case : List[Any] ):
__lowercase : List[Any] = {}
result.update({'''sari''': compute_sari(sources=_snake_case , predictions=_snake_case , references=_snake_case )} )
result.update({'''sacrebleu''': compute_sacrebleu(predictions=_snake_case , references=_snake_case )} )
result.update({'''exact''': compute_em(predictions=_snake_case , references=_snake_case )} )
return result
| 284 | 0 |
import re
def lowerCamelCase__ (_UpperCAmelCase):
if len(re.findall('[ATCG]' , _UpperCAmelCase)) != len(_UpperCAmelCase):
raise ValueError('Invalid Strand')
return dna.translate(dna.maketrans('ATCG' , 'TAGC'))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 73 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a_ : Optional[Any] = {
'configuration_mask2former': [
'MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Mask2FormerConfig',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Union[str, Any] = ['Mask2FormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[Any] = [
'MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'Mask2FormerForUniversalSegmentation',
'Mask2FormerModel',
'Mask2FormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
a_ : str = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 73 | 1 |
'''simple docstring'''
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
lowercase_ = None
lowercase_ = "<" if sys.byteorder == "little" else ">"
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
lowercase_ = [
np.dtype("|b1"),
np.dtype("|u1"),
np.dtype("<u2"),
np.dtype(">u2"),
np.dtype("<i2"),
np.dtype(">i2"),
np.dtype("<u4"),
np.dtype(">u4"),
np.dtype("<i4"),
np.dtype(">i4"),
np.dtype("<f4"),
np.dtype(">f4"),
np.dtype("<f8"),
np.dtype(">f8"),
]
@dataclass
class __A :
'''simple docstring'''
__lowerCamelCase : bool = True
__lowerCamelCase : Optional[str] = None
# Automatically constructed
__lowerCamelCase : ClassVar[str] = "PIL.Image.Image"
__lowerCamelCase : ClassVar[Any] = pa.struct({'bytes': pa.binary(), 'path': pa.string()} )
__lowerCamelCase : str = field(default='Image' , init=A , repr=A )
def __call__(self ) -> Union[str, Any]:
"""simple docstring"""
return self.pa_type
def a__ (self , A ) -> dict:
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
if isinstance(A , A ):
_a = np.array(A )
if isinstance(A , A ):
return {"path": value, "bytes": None}
elif isinstance(A , A ):
return {"path": None, "bytes": value}
elif isinstance(A , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(A )
elif isinstance(A , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(A )
elif value.get('''path''' ) is not None and os.path.isfile(value['''path'''] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get('''path''' )}
elif value.get('''bytes''' ) is not None or value.get('''path''' ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get('''bytes''' ), "path": value.get('''path''' )}
else:
raise ValueError(
f'''An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def a__ (self , A , A=None ) -> "PIL.Image.Image":
"""simple docstring"""
if not self.decode:
raise RuntimeError('''Decoding is disabled for this feature. Please use Image(decode=True) instead.''' )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support decoding images, please install \'Pillow\'.''' )
if token_per_repo_id is None:
_a = {}
_a , _a = value['''path'''], value['''bytes''']
if bytes_ is None:
if path is None:
raise ValueError(f'''An image should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
else:
if is_local_path(A ):
_a = PIL.Image.open(A )
else:
_a = path.split('''::''' )[-1]
try:
_a = string_to_dict(A , config.HUB_DATASETS_URL )['''repo_id''']
_a = token_per_repo_id.get(A )
except ValueError:
_a = None
with xopen(A , '''rb''' , use_auth_token=A ) as f:
_a = BytesIO(f.read() )
_a = PIL.Image.open(bytes_ )
else:
_a = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def a__ (self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
"""simple docstring"""
from .features import Value
return (
self
if self.decode
else {
"bytes": Value('''binary''' ),
"path": Value('''string''' ),
}
)
def a__ (self , A ) -> pa.StructArray:
"""simple docstring"""
if pa.types.is_string(storage.type ):
_a = pa.array([None] * len(A ) , type=pa.binary() )
_a = pa.StructArray.from_arrays([bytes_array, storage] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
_a = pa.array([None] * len(A ) , type=pa.string() )
_a = pa.StructArray.from_arrays([storage, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('''bytes''' ) >= 0:
_a = storage.field('''bytes''' )
else:
_a = pa.array([None] * len(A ) , type=pa.binary() )
if storage.type.get_field_index('''path''' ) >= 0:
_a = storage.field('''path''' )
else:
_a = pa.array([None] * len(A ) , type=pa.string() )
_a = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
_a = pa.array(
[encode_np_array(np.array(A ) )['''bytes'''] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
_a = pa.array([None] * len(A ) , type=pa.string() )
_a = pa.StructArray.from_arrays(
[bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null() )
return array_cast(A , self.pa_type )
def a__ (self , A ) -> pa.StructArray:
"""simple docstring"""
@no_op_if_value_is_null
def path_to_bytes(A ):
with xopen(A , '''rb''' ) as f:
_a = f.read()
return bytes_
_a = pa.array(
[
(path_to_bytes(x['''path'''] ) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
_a = pa.array(
[os.path.basename(A ) if path is not None else None for path in storage.field('''path''' ).to_pylist()] , type=pa.string() , )
_a = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null() )
return array_cast(A , self.pa_type )
def lowerCAmelCase ():
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''')
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
_a = list(set(PIL.Image.OPEN.keys()) & set(PIL.Image.SAVE.keys()))
return _IMAGE_COMPRESSION_FORMATS
def lowerCAmelCase (__A):
"""simple docstring"""
_a = BytesIO()
if image.format in list_image_compression_formats():
_a = image.format
else:
_a = '''PNG''' if image.mode in ['''1''', '''L''', '''LA''', '''RGB''', '''RGBA'''] else '''TIFF'''
image.save(__A , format=__A)
return buffer.getvalue()
def lowerCAmelCase (__A):
"""simple docstring"""
if hasattr(__A , '''filename''') and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(__A)}
def lowerCAmelCase (__A):
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''')
_a = array.dtype
_a = dtype.byteorder if dtype.byteorder != '''=''' else _NATIVE_BYTEORDER
_a = dtype.kind
_a = dtype.itemsize
_a = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
_a = np.dtype('''|u1''')
if dtype_kind not in ["u", "i"]:
raise TypeError(
F'''Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.''')
if dtype is not dest_dtype:
warnings.warn(F'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''')
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
_a = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
_a = dtype_byteorder + dtype_kind + str(__A)
_a = np.dtype(__A)
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(F'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''')
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
F'''Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}''')
_a = PIL.Image.fromarray(array.astype(__A))
return {"path": None, "bytes": image_to_bytes(__A)}
def lowerCAmelCase (__A):
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''')
if objs:
_a , _a = first_non_null_value(__A)
if isinstance(__A , __A):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(__A , np.ndarray):
_a = no_op_if_value_is_null(__A)
return [obj_to_image_dict_func(__A) for obj in objs]
elif isinstance(__A , PIL.Image.Image):
_a = no_op_if_value_is_null(__A)
return [obj_to_image_dict_func(__A) for obj in objs]
else:
return objs
else:
return objs
| 352 |
'''simple docstring'''
import numpy as np
import qiskit
def lowerCAmelCase (__A = 8 , __A = None):
"""simple docstring"""
_a = np.random.default_rng(seed=__A)
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
_a = 6 * key_len
# Measurement basis for Alice's qubits.
_a = rng.integers(2 , size=__A)
# The set of states Alice will prepare.
_a = rng.integers(2 , size=__A)
# Measurement basis for Bob's qubits.
_a = rng.integers(2 , size=__A)
# Quantum Circuit to simulate BB84
_a = qiskit.QuantumCircuit(__A , name='''BB84''')
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(__A):
if alice_state[index] == 1:
bbaa_circ.x(__A)
if alice_basis[index] == 1:
bbaa_circ.h(__A)
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(__A):
if bob_basis[index] == 1:
bbaa_circ.h(__A)
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
_a = qiskit.Aer.get_backend('''aer_simulator''')
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
_a = qiskit.execute(__A , __A , shots=1 , seed_simulator=__A)
# Returns the result of measurement.
_a = job.result().get_counts(__A).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
_a = ''''''.join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
__A , __A , __A)
if alice_basis_bit == bob_basis_bit
])
# Get final key. Pad with 0 if too short, otherwise truncate.
_a = gen_key[:key_len] if len(__A) >= key_len else gen_key.ljust(__A , '''0''')
return key
if __name__ == "__main__":
print(F"""The generated key is : {bbaa(8, seed=0)}""")
from doctest import testmod
testmod()
| 352 | 1 |
"""simple docstring"""
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
_lowerCAmelCase :Any = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument(
'--original_config_file',
type=str,
required=True,
help='The YAML config file corresponding to the original architecture.',
)
parser.add_argument(
'--num_in_channels',
default=None,
type=int,
help='The number of input channels. If `None` number of input channels will be automatically inferred.',
)
parser.add_argument(
'--image_size',
default=512,
type=int,
help=(
'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'
' Base. Use 768 for Stable Diffusion v2.'
),
)
parser.add_argument(
'--extract_ema',
action='store_true',
help=(
'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'
' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'
' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'
),
)
parser.add_argument(
'--upcast_attention',
action='store_true',
help=(
'Whether the attention computation should always be upcasted. This is necessary when running stable'
' diffusion 2.1.'
),
)
parser.add_argument(
'--from_safetensors',
action='store_true',
help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.',
)
parser.add_argument(
'--to_safetensors',
action='store_true',
help='Whether to store pipeline in safetensors format or not.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
def lowerCamelCase_ (UpperCamelCase__ : Dict ):
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(F'could not parse string as bool {string}' )
parser.add_argument(
'--use_linear_projection', help='Override for use linear projection', required=False, type=parse_bool
)
parser.add_argument('--cross_attention_dim', help='Override for cross attention_dim', required=False, type=int)
_lowerCAmelCase :Any = parser.parse_args()
_lowerCAmelCase :int = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 506 |
"""simple docstring"""
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def lowerCamelCase_ (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str=False ):
try:
_UpperCAmelCase : Tuple = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_UpperCAmelCase : str = default
else:
# KEY is set, convert it to True or False.
try:
_UpperCAmelCase : List[str] = strtobool(UpperCamelCase__ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F'If set, {key} must be yes or no.' )
return _value
_lowerCAmelCase :Optional[Any] = parse_flag_from_env('RUN_SLOW', default=False)
def lowerCamelCase_ (UpperCamelCase__ : int ):
return unittest.skip('''Test was skipped''' )(UpperCamelCase__ )
def lowerCamelCase_ (UpperCamelCase__ : Dict ):
return unittest.skipUnless(_run_slow_tests , '''test is slow''' )(UpperCamelCase__ )
def lowerCamelCase_ (UpperCamelCase__ : Optional[Any] ):
return unittest.skipUnless(not torch.cuda.is_available() , '''test requires only a CPU''' )(UpperCamelCase__ )
def lowerCamelCase_ (UpperCamelCase__ : Union[str, Any] ):
return unittest.skipUnless(torch.cuda.is_available() , '''test requires a GPU''' )(UpperCamelCase__ )
def lowerCamelCase_ (UpperCamelCase__ : int ):
return unittest.skipUnless(is_xpu_available() , '''test requires a XPU''' )(UpperCamelCase__ )
def lowerCamelCase_ (UpperCamelCase__ : List[Any] ):
return unittest.skipUnless(is_mps_available() , '''test requires a `mps` backend support in `torch`''' )(UpperCamelCase__ )
def lowerCamelCase_ (UpperCamelCase__ : List[str] ):
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , '''test requires the Hugging Face suite''' )(UpperCamelCase__ )
def lowerCamelCase_ (UpperCamelCase__ : Tuple ):
return unittest.skipUnless(is_bnb_available() , '''test requires the bitsandbytes library''' )(UpperCamelCase__ )
def lowerCamelCase_ (UpperCamelCase__ : Optional[Any] ):
return unittest.skipUnless(is_tpu_available() , '''test requires TPU''' )(UpperCamelCase__ )
def lowerCamelCase_ (UpperCamelCase__ : Union[str, Any] ):
return unittest.skipUnless(torch.cuda.device_count() == 1 , '''test requires a GPU''' )(UpperCamelCase__ )
def lowerCamelCase_ (UpperCamelCase__ : List[Any] ):
return unittest.skipUnless(torch.xpu.device_count() == 1 , '''test requires a XPU''' )(UpperCamelCase__ )
def lowerCamelCase_ (UpperCamelCase__ : Optional[Any] ):
return unittest.skipUnless(torch.cuda.device_count() > 1 , '''test requires multiple GPUs''' )(UpperCamelCase__ )
def lowerCamelCase_ (UpperCamelCase__ : Union[str, Any] ):
return unittest.skipUnless(torch.xpu.device_count() > 1 , '''test requires multiple XPUs''' )(UpperCamelCase__ )
def lowerCamelCase_ (UpperCamelCase__ : List[Any] ):
return unittest.skipUnless(is_safetensors_available() , '''test requires safetensors''' )(UpperCamelCase__ )
def lowerCamelCase_ (UpperCamelCase__ : Dict ):
return unittest.skipUnless(is_deepspeed_available() , '''test requires DeepSpeed''' )(UpperCamelCase__ )
def lowerCamelCase_ (UpperCamelCase__ : Optional[int] ):
return unittest.skipUnless(is_torch_version('''>=''' , '''1.12.0''' ) , '''test requires torch version >= 1.12.0''' )(UpperCamelCase__ )
def lowerCamelCase_ (UpperCamelCase__ : str=None , UpperCamelCase__ : str=None ):
if test_case is None:
return partial(UpperCamelCase__ , version=UpperCamelCase__ )
return unittest.skipUnless(is_torch_version('''>=''' , UpperCamelCase__ ) , F'test requires torch version >= {version}' )(UpperCamelCase__ )
def lowerCamelCase_ (UpperCamelCase__ : List[str] ):
return unittest.skipUnless(is_tensorboard_available() , '''test requires Tensorboard''' )(UpperCamelCase__ )
def lowerCamelCase_ (UpperCamelCase__ : Any ):
return unittest.skipUnless(is_wandb_available() , '''test requires wandb''' )(UpperCamelCase__ )
def lowerCamelCase_ (UpperCamelCase__ : Union[str, Any] ):
return unittest.skipUnless(is_comet_ml_available() , '''test requires comet_ml''' )(UpperCamelCase__ )
_lowerCAmelCase :Union[str, Any] = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def lowerCamelCase_ (UpperCamelCase__ : str ):
return unittest.skipUnless(
_atleast_one_tracker_available , '''test requires at least one tracker to be available and for `comet_ml` to not be installed''' , )(UpperCamelCase__ )
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
a__ =True
@classmethod
def __lowerCAmelCase ( cls ) -> Union[str, Any]:
_UpperCAmelCase : Tuple = tempfile.mkdtemp()
@classmethod
def __lowerCAmelCase ( cls ) -> List[Any]:
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def __lowerCAmelCase ( self ) -> Dict:
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob('''**/*''' ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(A )
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self ) -> Union[str, Any]:
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self , A ) -> List[Any]:
_UpperCAmelCase : Any = mocks if isinstance(A , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def lowerCamelCase_ (UpperCamelCase__ : Optional[int] ):
_UpperCAmelCase : Union[str, Any] = AcceleratorState()
_UpperCAmelCase : Union[str, Any] = tensor[None].clone().to(state.device )
_UpperCAmelCase : List[str] = gather(UpperCamelCase__ ).cpu()
_UpperCAmelCase : List[Any] = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , UpperCamelCase__ ):
return False
return True
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self , A , A , A ) -> Optional[int]:
_UpperCAmelCase : List[Any] = returncode
_UpperCAmelCase : Any = stdout
_UpperCAmelCase : Tuple = stderr
async def lowerCamelCase_ (UpperCamelCase__ : Dict , UpperCamelCase__ : int ):
while True:
_UpperCAmelCase : Tuple = await stream.readline()
if line:
callback(UpperCamelCase__ )
else:
break
async def lowerCamelCase_ (UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int=None , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : str=False ):
if echo:
print('''\nRunning: ''' , ''' '''.join(UpperCamelCase__ ) )
_UpperCAmelCase : Dict = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=UpperCamelCase__ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=UpperCamelCase__ , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
_UpperCAmelCase : Optional[Any] = []
_UpperCAmelCase : Optional[int] = []
def tee(UpperCamelCase__ : int , UpperCamelCase__ : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : Dict="" ):
_UpperCAmelCase : int = line.decode('''utf-8''' ).rstrip()
sink.append(UpperCamelCase__ )
if not quiet:
print(UpperCamelCase__ , UpperCamelCase__ , file=UpperCamelCase__ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda UpperCamelCase__ : tee(UpperCamelCase__ , UpperCamelCase__ , sys.stdout , label='''stdout:''' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda UpperCamelCase__ : tee(UpperCamelCase__ , UpperCamelCase__ , sys.stderr , label='''stderr:''' ) ) ),
] , timeout=UpperCamelCase__ , )
return _RunOutput(await p.wait() , UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : int=None , UpperCamelCase__ : Dict=None , UpperCamelCase__ : Union[str, Any]=180 , UpperCamelCase__ : int=False , UpperCamelCase__ : Optional[Any]=True ):
_UpperCAmelCase : Optional[int] = asyncio.get_event_loop()
_UpperCAmelCase : Union[str, Any] = loop.run_until_complete(
_stream_subprocess(UpperCamelCase__ , env=UpperCamelCase__ , stdin=UpperCamelCase__ , timeout=UpperCamelCase__ , quiet=UpperCamelCase__ , echo=UpperCamelCase__ ) )
_UpperCAmelCase : Tuple = ''' '''.join(UpperCamelCase__ )
if result.returncode > 0:
_UpperCAmelCase : Any = '''\n'''.join(result.stderr )
raise RuntimeError(
F'\'{cmd_str}\' failed with returncode {result.returncode}\n\n'
F'The combined stderr from workers follows:\n{stderr}' )
return result
class _UpperCAmelCase ( a ):
'''simple docstring'''
pass
def lowerCamelCase_ (UpperCamelCase__ : List[str] , UpperCamelCase__ : str=False ):
try:
_UpperCAmelCase : Union[str, Any] = subprocess.check_output(UpperCamelCase__ , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(UpperCamelCase__ , '''decode''' ):
_UpperCAmelCase : List[Any] = output.decode('''utf-8''' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
F'Command `{" ".join(UpperCamelCase__ )}` failed with the following error:\n\n{e.output.decode()}' ) from e
| 506 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase_ = {
'configuration_convnext': ['CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvNextConfig', 'ConvNextOnnxConfig']
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['ConvNextFeatureExtractor']
lowerCAmelCase_ = ['ConvNextImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvNextForImageClassification',
'ConvNextModel',
'ConvNextPreTrainedModel',
'ConvNextBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'TFConvNextForImageClassification',
'TFConvNextModel',
'TFConvNextPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure) | 596 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase_ = {
'configuration_altclip': [
'ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AltCLIPConfig',
'AltCLIPTextConfig',
'AltCLIPVisionConfig',
],
'processing_altclip': ['AltCLIPProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'AltCLIPPreTrainedModel',
'AltCLIPModel',
'AltCLIPTextModel',
'AltCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 596 | 1 |
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class _lowerCamelCase:
def __init__( self, lowerCamelCase, lowerCamelCase=13, lowerCamelCase=7, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=False, lowerCamelCase=False, lowerCamelCase=False, lowerCamelCase=2, lowerCamelCase=99, lowerCamelCase=0, lowerCamelCase=32, lowerCamelCase=5, lowerCamelCase=4, lowerCamelCase=0.1, lowerCamelCase=0.1, lowerCamelCase=5_12, lowerCamelCase=2, lowerCamelCase=0.0_2, lowerCamelCase=2, lowerCamelCase=4, lowerCamelCase="last", lowerCamelCase=True, lowerCamelCase=None, lowerCamelCase=0, ) -> str:
"""simple docstring"""
_lowercase : Union[str, Any] = parent
_lowercase : Optional[Any] = batch_size
_lowercase : List[str] = seq_length
_lowercase : int = is_training
_lowercase : List[str] = use_input_lengths
_lowercase : int = use_token_type_ids
_lowercase : Any = use_labels
_lowercase : Union[str, Any] = gelu_activation
_lowercase : List[str] = sinusoidal_embeddings
_lowercase : str = causal
_lowercase : Optional[int] = asm
_lowercase : Union[str, Any] = n_langs
_lowercase : List[Any] = vocab_size
_lowercase : Any = n_special
_lowercase : Any = hidden_size
_lowercase : str = num_hidden_layers
_lowercase : Union[str, Any] = num_attention_heads
_lowercase : Tuple = hidden_dropout_prob
_lowercase : Optional[int] = attention_probs_dropout_prob
_lowercase : Union[str, Any] = max_position_embeddings
_lowercase : List[str] = type_sequence_label_size
_lowercase : Any = initializer_range
_lowercase : int = num_labels
_lowercase : Optional[int] = num_choices
_lowercase : Optional[Any] = summary_type
_lowercase : Optional[Any] = use_proj
_lowercase : int = scope
_lowercase : List[Any] = bos_token_id
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : Tuple = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
_lowercase : List[str] = random_attention_mask([self.batch_size, self.seq_length])
_lowercase : int = None
if self.use_input_lengths:
_lowercase : Dict = (
ids_tensor([self.batch_size], vocab_size=2) + self.seq_length - 2
) # small variation of seq_length
_lowercase : Tuple = None
if self.use_token_type_ids:
_lowercase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length], self.n_langs)
_lowercase : Tuple = None
_lowercase : int = None
_lowercase : int = None
if self.use_labels:
_lowercase : str = ids_tensor([self.batch_size], self.type_sequence_label_size)
_lowercase : str = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
_lowercase : Dict = ids_tensor([self.batch_size], 2).float()
_lowercase : Tuple = ids_tensor([self.batch_size], self.num_choices)
_lowercase : Dict = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
return XLMConfig(
vocab_size=self.vocab_size, n_special=self.n_special, emb_dim=self.hidden_size, n_layers=self.num_hidden_layers, n_heads=self.num_attention_heads, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, gelu_activation=self.gelu_activation, sinusoidal_embeddings=self.sinusoidal_embeddings, asm=self.asm, causal=self.causal, n_langs=self.n_langs, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, summary_type=self.summary_type, use_proj=self.use_proj, num_labels=self.num_labels, bos_token_id=self.bos_token_id, )
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> Tuple:
"""simple docstring"""
_lowercase : List[Any] = XLMModel(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : str = model(lowerCamelCase, lengths=lowerCamelCase, langs=lowerCamelCase)
_lowercase : int = model(lowerCamelCase, langs=lowerCamelCase)
_lowercase : Any = model(lowerCamelCase)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> List[Any]:
"""simple docstring"""
_lowercase : Dict = XLMWithLMHeadModel(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : int = model(lowerCamelCase, token_type_ids=lowerCamelCase, labels=lowerCamelCase)
self.parent.assertEqual(result.loss.shape, ())
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> str:
"""simple docstring"""
_lowercase : Tuple = XLMForQuestionAnsweringSimple(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Dict = model(lowerCamelCase)
_lowercase : List[str] = model(lowerCamelCase, start_positions=lowerCamelCase, end_positions=lowerCamelCase)
_lowercase : Any = outputs
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Tuple = XLMForQuestionAnswering(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Optional[Any] = model(lowerCamelCase)
_lowercase : List[Any] = model(
lowerCamelCase, start_positions=lowerCamelCase, end_positions=lowerCamelCase, cls_index=lowerCamelCase, is_impossible=lowerCamelCase, p_mask=lowerCamelCase, )
_lowercase : List[str] = model(
lowerCamelCase, start_positions=lowerCamelCase, end_positions=lowerCamelCase, cls_index=lowerCamelCase, is_impossible=lowerCamelCase, )
((_lowercase) , ) : Optional[Any] = result_with_labels.to_tuple()
_lowercase : List[str] = model(lowerCamelCase, start_positions=lowerCamelCase, end_positions=lowerCamelCase)
((_lowercase) , ) : Any = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape, ())
self.parent.assertEqual(result.start_top_log_probs.shape, (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(result.start_top_index.shape, (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(
result.end_top_log_probs.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(
result.end_top_index.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(result.cls_logits.shape, (self.batch_size,))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> int:
"""simple docstring"""
_lowercase : Optional[Any] = XLMForSequenceClassification(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Optional[int] = model(lowerCamelCase)
_lowercase : Optional[int] = model(lowerCamelCase, labels=lowerCamelCase)
self.parent.assertEqual(result.loss.shape, ())
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> List[str]:
"""simple docstring"""
_lowercase : Any = self.num_labels
_lowercase : str = XLMForTokenClassification(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : int = model(lowerCamelCase, attention_mask=lowerCamelCase, labels=lowerCamelCase)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> Dict:
"""simple docstring"""
_lowercase : Optional[Any] = self.num_choices
_lowercase : Optional[int] = XLMForMultipleChoice(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Optional[Any] = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
_lowercase : int = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
_lowercase : Optional[Any] = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
_lowercase : List[str] = model(
lowerCamelCase, attention_mask=lowerCamelCase, token_type_ids=lowerCamelCase, labels=lowerCamelCase, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices))
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Dict = self.prepare_config_and_inputs()
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) : Optional[Any] = config_and_inputs
_lowercase : List[str] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class _lowerCamelCase( _a, _a, _a, unittest.TestCase ):
lowercase_ : Any = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
lowercase_ : Optional[int] = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
lowercase_ : Union[str, Any] = (
{
"""feature-extraction""": XLMModel,
"""fill-mask""": XLMWithLMHeadModel,
"""question-answering""": XLMForQuestionAnsweringSimple,
"""text-classification""": XLMForSequenceClassification,
"""text-generation""": XLMWithLMHeadModel,
"""token-classification""": XLMForTokenClassification,
"""zero-shot""": XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Optional[int]:
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast')
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase=False) -> Optional[int]:
"""simple docstring"""
_lowercase : Any = super()._prepare_for_class(lowerCamelCase, lowerCamelCase, return_labels=lowerCamelCase)
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
_lowercase : Any = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=lowerCamelCase)
_lowercase : Dict = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=lowerCamelCase)
return inputs_dict
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : Union[str, Any] = XLMModelTester(self)
_lowercase : List[str] = ConfigTester(self, config_class=lowerCamelCase, emb_dim=37)
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*lowerCamelCase)
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*lowerCamelCase)
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*lowerCamelCase)
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*lowerCamelCase)
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*lowerCamelCase)
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
_lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*lowerCamelCase)
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=False, lowerCamelCase=1) -> int:
"""simple docstring"""
self.assertIsInstance(lowerCamelCase, lowerCamelCase)
self.assertListEqual(
[isinstance(lowerCamelCase, lowerCamelCase) for iter_attentions in attentions], [True] * len(lowerCamelCase))
self.assertEqual(len(lowerCamelCase), (max_length - min_length) * num_beam_groups)
for idx, iter_attentions in enumerate(lowerCamelCase):
# adds PAD dummy token
_lowercase : Dict = min_length + idx + 1
_lowercase : int = min_length + idx + 1
_lowercase : Dict = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions], [expected_shape] * len(lowerCamelCase))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=False, lowerCamelCase=1) -> List[Any]:
"""simple docstring"""
self.assertIsInstance(lowerCamelCase, lowerCamelCase)
self.assertListEqual(
[isinstance(lowerCamelCase, lowerCamelCase) for iter_hidden_states in hidden_states], [True] * len(lowerCamelCase), )
self.assertEqual(len(lowerCamelCase), (max_length - min_length) * num_beam_groups)
for idx, iter_hidden_states in enumerate(lowerCamelCase):
# adds PAD dummy token
_lowercase : int = min_length + idx + 1
_lowercase : int = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states], [expected_shape] * len(lowerCamelCase), )
pass
@slow
def UpperCamelCase ( self) -> int:
"""simple docstring"""
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : Dict = XLMModel.from_pretrained(lowerCamelCase)
self.assertIsNotNone(lowerCamelCase)
@require_torch
class _lowerCamelCase( unittest.TestCase ):
@slow
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Tuple = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048')
model.to(lowerCamelCase)
_lowercase : Union[str, Any] = torch.tensor([[14, 4_47]], dtype=torch.long, device=lowerCamelCase) # the president
_lowercase : Any = [
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
_lowercase : str = model.generate(lowerCamelCase, do_sample=lowerCamelCase)
self.assertListEqual(output_ids[0].cpu().numpy().tolist(), lowerCamelCase)
| 89 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowercase : int = logging.get_logger(__name__)
lowercase : int = {
"""facebook/convnextv2-tiny-1k-224""": """https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json""",
}
class a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
_A = "convnextv2"
def __init__( self : List[str] , A_ : Any=3 , A_ : Tuple=4 , A_ : Dict=4 , A_ : Union[str, Any]=None , A_ : List[str]=None , A_ : Optional[int]="gelu" , A_ : Optional[Any]=0.02 , A_ : Optional[int]=1e-12 , A_ : Tuple=0.0 , A_ : int=2_24 , A_ : int=None , A_ : Optional[int]=None , **A_ : Union[str, Any] , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**A_ )
lowerCamelCase_: Tuple = num_channels
lowerCamelCase_: Union[str, Any] = patch_size
lowerCamelCase_: Any = num_stages
lowerCamelCase_: Optional[Any] = [96, 1_92, 3_84, 7_68] if hidden_sizes is None else hidden_sizes
lowerCamelCase_: Optional[Any] = [3, 3, 9, 3] if depths is None else depths
lowerCamelCase_: Tuple = hidden_act
lowerCamelCase_: Optional[int] = initializer_range
lowerCamelCase_: Dict = layer_norm_eps
lowerCamelCase_: Tuple = drop_path_rate
lowerCamelCase_: Any = image_size
lowerCamelCase_: Dict = ["""stem"""] + [f"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )]
lowerCamelCase_ , lowerCamelCase_: int = get_aligned_output_features_output_indices(
out_features=A_ , out_indices=A_ , stage_names=self.stage_names )
| 423 | 0 |
from math import factorial
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if successes > trials:
raise ValueError("""successes must be lower or equal to trials""" )
if trials < 0 or successes < 0:
raise ValueError("""the function is defined for non-negative integers""" )
if not isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) or not isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
raise ValueError("""the function is defined for non-negative integers""" )
if not 0 < prob < 1:
raise ValueError("""prob has to be in range of 1 - 0""" )
lowerCAmelCase : List[Any] = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
lowerCAmelCase : int = float(factorial(SCREAMING_SNAKE_CASE__ ) )
coefficient /= factorial(SCREAMING_SNAKE_CASE__ ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print('Probability of 2 successes out of 4 trails')
print('with probability of 0.75 is:', end=' ')
print(binomial_distribution(2, 4, 0.7_5))
| 703 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return int(input_a == input_a == 0 )
def _UpperCAmelCase ( ):
'''simple docstring'''
print("""Truth Table of NOR Gate:""" )
print("""| Input 1 | Input 2 | Output |""" )
print(F"""| 0 | 0 | {nor_gate(0 ,0 )} |""" )
print(F"""| 0 | 1 | {nor_gate(0 ,1 )} |""" )
print(F"""| 1 | 0 | {nor_gate(1 ,0 )} |""" )
print(F"""| 1 | 1 | {nor_gate(1 ,1 )} |""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 693 | 0 |
"""simple docstring"""
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class __lowercase ( unittest.TestCase ):
def __lowercase ( self : Any ):
'''simple docstring'''
# A mock response for an HTTP head request to emulate server down
UpperCAmelCase__ : Optional[int] = mock.Mock()
UpperCAmelCase__ : List[Any] = 500
UpperCAmelCase__ : int = {}
UpperCAmelCase__ : List[str] = HTTPError
UpperCAmelCase__ : str = {}
# Download this model to make sure it's in the cache.
UpperCAmelCase__ : int = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("""requests.Session.request""" ,return_value=A ) as mock_head:
UpperCAmelCase__ : Any = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def __lowercase ( self : int ):
'''simple docstring'''
# A mock response for an HTTP head request to emulate server down
UpperCAmelCase__ : List[str] = mock.Mock()
UpperCAmelCase__ : List[str] = 500
UpperCAmelCase__ : Optional[int] = {}
UpperCAmelCase__ : Any = HTTPError
UpperCAmelCase__ : Any = {}
# Download this model to make sure it's in the cache.
UpperCAmelCase__ : List[Any] = GPTaTokenizerFast.from_pretrained("""gpt2""" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("""requests.Session.request""" ,return_value=A ) as mock_head:
UpperCAmelCase__ : List[Any] = GPTaTokenizerFast.from_pretrained("""gpt2""" )
# This check we did call the fake head request
mock_head.assert_called()
def __lowercase ( self : List[str] ):
'''simple docstring'''
# This test is for deprecated behavior and can be removed in v5
try:
UpperCAmelCase__ : List[str] = tempfile.mktemp()
with open(A ,"""wb""" ) as f:
http_get("""https://huggingface.co/albert-base-v1/resolve/main/spiece.model""" ,A )
UpperCAmelCase__ : Any = AlbertTokenizer.from_pretrained(A )
finally:
os.remove(A )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile("""tokenizer.json""" ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open("""tokenizer.json""" ,"""wb""" ) as f:
http_get("""https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json""" ,A )
UpperCAmelCase__ : List[str] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size ,1_000 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove("""tokenizer.json""" )
def __lowercase ( self : List[str] ):
'''simple docstring'''
# This test is for deprecated behavior and can be removed in v5
UpperCAmelCase__ : Tuple = AlbertTokenizer.from_pretrained("""https://huggingface.co/albert-base-v1/resolve/main/spiece.model""" )
@is_staging_test
class __lowercase ( unittest.TestCase ):
snake_case_ = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """bla""", """blou"""]
@classmethod
def __lowercase ( cls : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : int = TOKEN
HfFolder.save_token(A )
@classmethod
def __lowercase ( cls : List[Any] ):
'''simple docstring'''
try:
delete_repo(token=cls._token ,repo_id="""test-tokenizer""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id="""valid_org/test-tokenizer-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id="""test-dynamic-tokenizer""" )
except HTTPError:
pass
def __lowercase ( self : int ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase__ : List[Any] = os.path.join(A ,"""vocab.txt""" )
with open(A ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
UpperCAmelCase__ : List[Any] = BertTokenizer(A )
tokenizer.push_to_hub("""test-tokenizer""" ,use_auth_token=self._token )
UpperCAmelCase__ : List[Any] = BertTokenizer.from_pretrained(f"{USER}/test-tokenizer" )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
# Reset repo
delete_repo(token=self._token ,repo_id="""test-tokenizer""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(A ,repo_id="""test-tokenizer""" ,push_to_hub=A ,use_auth_token=self._token )
UpperCAmelCase__ : str = BertTokenizer.from_pretrained(f"{USER}/test-tokenizer" )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
def __lowercase ( self : List[str] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase__ : Dict = os.path.join(A ,"""vocab.txt""" )
with open(A ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
UpperCAmelCase__ : Union[str, Any] = BertTokenizer(A )
tokenizer.push_to_hub("""valid_org/test-tokenizer-org""" ,use_auth_token=self._token )
UpperCAmelCase__ : Dict = BertTokenizer.from_pretrained("""valid_org/test-tokenizer-org""" )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
# Reset repo
delete_repo(token=self._token ,repo_id="""valid_org/test-tokenizer-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
A ,repo_id="""valid_org/test-tokenizer-org""" ,push_to_hub=A ,use_auth_token=self._token )
UpperCAmelCase__ : List[str] = BertTokenizer.from_pretrained("""valid_org/test-tokenizer-org""" )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
@require_tokenizers
def __lowercase ( self : str ):
'''simple docstring'''
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase__ : Optional[Any] = os.path.join(A ,"""vocab.txt""" )
with open(A ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
UpperCAmelCase__ : Tuple = CustomTokenizer(A )
# No fast custom tokenizer
tokenizer.push_to_hub("""test-dynamic-tokenizer""" ,use_auth_token=self._token )
UpperCAmelCase__ : Dict = AutoTokenizer.from_pretrained(f"{USER}/test-dynamic-tokenizer" ,trust_remote_code=A )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,"""CustomTokenizer""" )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase__ : List[str] = os.path.join(A ,"""vocab.txt""" )
with open(A ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
UpperCAmelCase__ : Dict = BertTokenizerFast.from_pretrained(A )
bert_tokenizer.save_pretrained(A )
UpperCAmelCase__ : Dict = CustomTokenizerFast.from_pretrained(A )
tokenizer.push_to_hub("""test-dynamic-tokenizer""" ,use_auth_token=self._token )
UpperCAmelCase__ : Any = AutoTokenizer.from_pretrained(f"{USER}/test-dynamic-tokenizer" ,trust_remote_code=A )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,"""CustomTokenizerFast""" )
UpperCAmelCase__ : str = AutoTokenizer.from_pretrained(
f"{USER}/test-dynamic-tokenizer" ,use_fast=A ,trust_remote_code=A )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,"""CustomTokenizer""" )
class __lowercase ( unittest.TestCase ):
def __lowercase ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = Trie()
trie.add("""Hello 友達""" )
self.assertEqual(trie.data ,{"""H""": {"""e""": {"""l""": {"""l""": {"""o""": {""" """: {"""友""": {"""達""": {"""""": 1}}}}}}}}} )
trie.add("""Hello""" )
trie.data
self.assertEqual(trie.data ,{"""H""": {"""e""": {"""l""": {"""l""": {"""o""": {"""""": 1, """ """: {"""友""": {"""達""": {"""""": 1}}}}}}}}} )
def __lowercase ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = Trie()
self.assertEqual(trie.split("""[CLS] This is a extra_id_100""" ) ,["""[CLS] This is a extra_id_100"""] )
trie.add("""[CLS]""" )
trie.add("""extra_id_1""" )
trie.add("""extra_id_100""" )
self.assertEqual(trie.split("""[CLS] This is a extra_id_100""" ) ,["""[CLS]""", """ This is a """, """extra_id_100"""] )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : int = Trie()
trie.add("""A""" )
self.assertEqual(trie.split("""ABC""" ) ,["""A""", """BC"""] )
self.assertEqual(trie.split("""BCA""" ) ,["""BC""", """A"""] )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = Trie()
trie.add("""TOKEN]""" )
trie.add("""[SPECIAL_TOKEN]""" )
self.assertEqual(trie.split("""This is something [SPECIAL_TOKEN]""" ) ,["""This is something """, """[SPECIAL_TOKEN]"""] )
def __lowercase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Dict = Trie()
trie.add("""A""" )
trie.add("""P""" )
trie.add("""[SPECIAL_TOKEN]""" )
self.assertEqual(trie.split("""This is something [SPECIAL_TOKEN]""" ) ,["""This is something """, """[SPECIAL_TOKEN]"""] )
def __lowercase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = Trie()
trie.add("""AB""" )
trie.add("""B""" )
trie.add("""C""" )
self.assertEqual(trie.split("""ABC""" ) ,["""AB""", """C"""] )
def __lowercase ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : int = Trie()
trie.add("""ABC""" )
trie.add("""B""" )
trie.add("""CD""" )
self.assertEqual(trie.split("""ABCD""" ) ,["""ABC""", """D"""] )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
# Even if the offsets are wrong, we necessarily output correct string
# parts.
UpperCAmelCase__ : Dict = Trie()
UpperCAmelCase__ : Optional[int] = trie.cut_text("""ABC""" ,[0, 0, 2, 1, 2, 3] )
self.assertEqual(A ,["""AB""", """C"""] )
| 65 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE : List[Any] = {
"configuration_longformer": [
"LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"LongformerConfig",
"LongformerOnnxConfig",
],
"tokenization_longformer": ["LongformerTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Optional[int] = ["LongformerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Dict = [
"LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"LongformerForMaskedLM",
"LongformerForMultipleChoice",
"LongformerForQuestionAnswering",
"LongformerForSequenceClassification",
"LongformerForTokenClassification",
"LongformerModel",
"LongformerPreTrainedModel",
"LongformerSelfAttention",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Dict = [
"TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFLongformerForMaskedLM",
"TFLongformerForMultipleChoice",
"TFLongformerForQuestionAnswering",
"TFLongformerForSequenceClassification",
"TFLongformerForTokenClassification",
"TFLongformerModel",
"TFLongformerPreTrainedModel",
"TFLongformerSelfAttention",
]
if TYPE_CHECKING:
from .configuration_longformer import (
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
LongformerConfig,
LongformerOnnxConfig,
)
from .tokenization_longformer import LongformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_longformer_fast import LongformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
else:
import sys
SCREAMING_SNAKE_CASE : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 257 | 0 |
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
def __init__( self : List[str] , __snake_case : str = None , __snake_case : List[str] = None , __snake_case : str = None , __snake_case : Optional[Any] = None , __snake_case : Dict = False , __snake_case : int = False , __snake_case : Union[str, Any] = None , **__snake_case : Optional[int] , ):
lowerCamelCase :List[Any] = path_or_paths
lowerCamelCase :Any = split if split or isinstance(__snake_case , __snake_case ) else '''train'''
lowerCamelCase :Optional[Any] = features
lowerCamelCase :Optional[int] = cache_dir
lowerCamelCase :int = keep_in_memory
lowerCamelCase :Optional[Any] = streaming
lowerCamelCase :Tuple = num_proc
lowerCamelCase :str = kwargs
@abstractmethod
def snake_case ( self : Any ):
pass
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
def __init__( self : Any , __snake_case : int = None , __snake_case : int = None , __snake_case : Dict = False , __snake_case : Optional[Any] = False , __snake_case : int = None , **__snake_case : Optional[int] , ):
lowerCamelCase :int = features
lowerCamelCase :List[Any] = cache_dir
lowerCamelCase :str = keep_in_memory
lowerCamelCase :Optional[Any] = streaming
lowerCamelCase :Optional[int] = num_proc
lowerCamelCase :int = kwargs
@abstractmethod
def snake_case ( self : List[str] ):
pass
| 702 | import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
A__ = imread(R"""digital_image_processing/image_data/lena_small.jpg""")
A__ = cvtColor(img, COLOR_BGR2GRAY)
def _lowerCamelCase ( ):
lowerCamelCase :int = cn.convert_to_negative(a_)
# assert negative_img array for at least one True
assert negative_img.any()
def _lowerCamelCase ( ):
with Image.open('''digital_image_processing/image_data/lena_small.jpg''') as img:
# Work around assertion for response
assert str(cc.change_contrast(a_ , 1_10)).startswith(
'''<PIL.Image.Image image mode=RGB size=100x100 at''')
def _lowerCamelCase ( ):
lowerCamelCase :Optional[Any] = canny.gen_gaussian_kernel(9 , sigma=1.4)
# Assert ambiguous array
assert resp.all()
def _lowerCamelCase ( ):
lowerCamelCase :str = imread('''digital_image_processing/image_data/lena_small.jpg''' , 0)
# assert ambiguous array for all == True
assert canny_img.all()
lowerCamelCase :Optional[Any] = canny.canny(a_)
# assert canny array for at least one True
assert canny_array.any()
def _lowerCamelCase ( ):
assert gg.gaussian_filter(a_ , 5 , sigma=0.9).all()
def _lowerCamelCase ( ):
# laplace diagonals
lowerCamelCase :List[Any] = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]])
lowerCamelCase :List[Any] = conv.img_convolve(a_ , a_).astype(a_)
assert res.any()
def _lowerCamelCase ( ):
assert med.median_filter(a_ , 3).any()
def _lowerCamelCase ( ):
lowerCamelCase , lowerCamelCase :Union[str, Any] = sob.sobel_filter(a_)
assert grad.any() and theta.any()
def _lowerCamelCase ( ):
lowerCamelCase :Dict = sp.make_sepia(a_ , 20)
assert sepia.all()
def _lowerCamelCase ( a_ : str = "digital_image_processing/image_data/lena_small.jpg"):
lowerCamelCase :Any = bs.Burkes(imread(a_ , 1) , 1_20)
burkes.process()
assert burkes.output_img.any()
def _lowerCamelCase ( a_ : str = "digital_image_processing/image_data/lena_small.jpg" , ):
lowerCamelCase :Tuple = rs.NearestNeighbour(imread(a_ , 1) , 4_00 , 2_00)
nn.process()
assert nn.output.any()
def _lowerCamelCase ( ):
lowerCamelCase :Tuple = '''digital_image_processing/image_data/lena.jpg'''
# Reading the image and converting it to grayscale.
lowerCamelCase :Tuple = imread(a_ , 0)
# Test for get_neighbors_pixel function() return not None
lowerCamelCase :Dict = 0
lowerCamelCase :Optional[Any] = 0
lowerCamelCase :str = image[x_coordinate][y_coordinate]
lowerCamelCase :Any = lbp.get_neighbors_pixel(
a_ , a_ , a_ , a_)
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
lowerCamelCase :int = np.zeros((image.shape[0], image.shape[1]))
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0]):
for j in range(0 , image.shape[1]):
lowerCamelCase :Optional[int] = lbp.local_binary_value(a_ , a_ , a_)
assert lbp_image.any()
| 49 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__a = {
'configuration_clap': [
'CLAP_PRETRAINED_MODEL_ARCHIVE_LIST',
'ClapAudioConfig',
'ClapConfig',
'ClapTextConfig',
],
'processing_clap': ['ClapProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'CLAP_PRETRAINED_MODEL_ARCHIVE_LIST',
'ClapModel',
'ClapPreTrainedModel',
'ClapTextModel',
'ClapTextModelWithProjection',
'ClapAudioModel',
'ClapAudioModelWithProjection',
]
__a = ['ClapFeatureExtractor']
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 30 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase : Tuple = logging.get_logger(__name__)
def __snake_case (__UpperCAmelCase ):
"""simple docstring"""
# initialize config
if "resnet-50" in model_name:
lowerCamelCase_ : Union[str, Any] = ResNetConfig.from_pretrained('''microsoft/resnet-50''' )
elif "resnet-101" in model_name:
lowerCamelCase_ : str = ResNetConfig.from_pretrained('''microsoft/resnet-101''' )
else:
raise ValueError('''Model name should include either resnet50 or resnet101''' )
lowerCamelCase_ : Optional[int] = DetrConfig(use_timm_backbone=__UpperCAmelCase , backbone_config=__UpperCAmelCase )
# set label attributes
lowerCamelCase_ : str = '''panoptic''' in model_name
if is_panoptic:
lowerCamelCase_ : Tuple = 250
else:
lowerCamelCase_ : Any = 91
lowerCamelCase_ : Optional[int] = '''huggingface/label-files'''
lowerCamelCase_ : Tuple = '''coco-detection-id2label.json'''
lowerCamelCase_ : Union[str, Any] = json.load(open(hf_hub_download(__UpperCAmelCase , __UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
lowerCamelCase_ : Dict = {int(__UpperCAmelCase ): v for k, v in idalabel.items()}
lowerCamelCase_ : List[Any] = idalabel
lowerCamelCase_ : List[Any] = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def __snake_case (__UpperCAmelCase ):
"""simple docstring"""
# here we list all keys to be renamed (original name on the left, our name on the right)
lowerCamelCase_ : List[Any] = []
# stem
# fmt: off
rename_keys.append(('''backbone.0.body.conv1.weight''', '''backbone.conv_encoder.model.embedder.embedder.convolution.weight''') )
rename_keys.append(('''backbone.0.body.bn1.weight''', '''backbone.conv_encoder.model.embedder.embedder.normalization.weight''') )
rename_keys.append(('''backbone.0.body.bn1.bias''', '''backbone.conv_encoder.model.embedder.embedder.normalization.bias''') )
rename_keys.append(('''backbone.0.body.bn1.running_mean''', '''backbone.conv_encoder.model.embedder.embedder.normalization.running_mean''') )
rename_keys.append(('''backbone.0.body.bn1.running_var''', '''backbone.conv_encoder.model.embedder.embedder.normalization.running_var''') )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var""",
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var""",
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""",
F"""encoder.layers.{i}.self_attn.out_proj.weight""",
) )
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias""") )
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias""") )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""",
F"""decoder.layers.{i}.self_attn.out_proj.weight""",
) )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""") )
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""",
F"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
) )
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""",
F"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
) )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias""") )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
] )
return rename_keys
def __snake_case (__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase_ : Optional[Any] = state_dict.pop(__UpperCAmelCase )
lowerCamelCase_ : List[Any] = val
def __snake_case (__UpperCAmelCase , __UpperCAmelCase=False ):
"""simple docstring"""
lowerCamelCase_ : Any = ''''''
if is_panoptic:
lowerCamelCase_ : Tuple = '''detr.'''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
lowerCamelCase_ : str = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
lowerCamelCase_ : str = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase_ : Dict = in_proj_weight[:256, :]
lowerCamelCase_ : Any = in_proj_bias[:256]
lowerCamelCase_ : Optional[Any] = in_proj_weight[256:512, :]
lowerCamelCase_ : Optional[Any] = in_proj_bias[256:512]
lowerCamelCase_ : List[Any] = in_proj_weight[-256:, :]
lowerCamelCase_ : Optional[int] = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
lowerCamelCase_ : int = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight""" )
lowerCamelCase_ : Optional[int] = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase_ : Optional[int] = in_proj_weight[:256, :]
lowerCamelCase_ : int = in_proj_bias[:256]
lowerCamelCase_ : int = in_proj_weight[256:512, :]
lowerCamelCase_ : Optional[int] = in_proj_bias[256:512]
lowerCamelCase_ : List[str] = in_proj_weight[-256:, :]
lowerCamelCase_ : Optional[Any] = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
lowerCamelCase_ : List[str] = state_dict.pop(
F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight""" )
lowerCamelCase_ : Optional[Any] = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
lowerCamelCase_ : Tuple = in_proj_weight_cross_attn[:256, :]
lowerCamelCase_ : int = in_proj_bias_cross_attn[:256]
lowerCamelCase_ : int = in_proj_weight_cross_attn[256:512, :]
lowerCamelCase_ : List[str] = in_proj_bias_cross_attn[256:512]
lowerCamelCase_ : List[Any] = in_proj_weight_cross_attn[-256:, :]
lowerCamelCase_ : str = in_proj_bias_cross_attn[-256:]
def __snake_case ():
"""simple docstring"""
lowerCamelCase_ : str = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCamelCase_ : Tuple = Image.open(requests.get(__UpperCAmelCase , stream=__UpperCAmelCase ).raw )
return im
@torch.no_grad()
def __snake_case (__UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=False ):
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ : Optional[Any] = get_detr_config(__UpperCAmelCase )
# load original model from torch hub
lowerCamelCase_ : str = {
'''detr-resnet-50''': '''detr_resnet50''',
'''detr-resnet-101''': '''detr_resnet101''',
}
logger.info(F"""Converting model {model_name}...""" )
lowerCamelCase_ : Any = torch.hub.load('''facebookresearch/detr''' , model_name_to_original_name[model_name] , pretrained=__UpperCAmelCase ).eval()
lowerCamelCase_ : List[str] = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(__UpperCAmelCase ):
if is_panoptic:
lowerCamelCase_ : str = '''detr.''' + src
rename_key(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(__UpperCAmelCase , is_panoptic=__UpperCAmelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
lowerCamelCase_ : str = '''detr.model.''' if is_panoptic else '''model.'''
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('''detr''' )
and not key.startswith('''class_labels_classifier''' )
and not key.startswith('''bbox_predictor''' )
):
lowerCamelCase_ : Union[str, Any] = state_dict.pop(__UpperCAmelCase )
lowerCamelCase_ : int = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
lowerCamelCase_ : Any = state_dict.pop(__UpperCAmelCase )
lowerCamelCase_ : int = val
elif key.startswith('''bbox_attention''' ) or key.startswith('''mask_head''' ):
continue
else:
lowerCamelCase_ : Any = state_dict.pop(__UpperCAmelCase )
lowerCamelCase_ : List[str] = val
else:
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
lowerCamelCase_ : Tuple = state_dict.pop(__UpperCAmelCase )
lowerCamelCase_ : Tuple = val
# finally, create HuggingFace model and load state dict
lowerCamelCase_ : Union[str, Any] = DetrForSegmentation(__UpperCAmelCase ) if is_panoptic else DetrForObjectDetection(__UpperCAmelCase )
model.load_state_dict(__UpperCAmelCase )
model.eval()
# verify our conversion on an image
lowerCamelCase_ : str = '''coco_panoptic''' if is_panoptic else '''coco_detection'''
lowerCamelCase_ : Tuple = DetrImageProcessor(format=__UpperCAmelCase )
lowerCamelCase_ : List[Any] = processor(images=prepare_img() , return_tensors='''pt''' )
lowerCamelCase_ : Optional[int] = encoding['''pixel_values''']
lowerCamelCase_ : List[Any] = detr(__UpperCAmelCase )
lowerCamelCase_ : Dict = model(__UpperCAmelCase )
assert torch.allclose(outputs.logits , original_outputs['''pred_logits'''] , atol=1E-3 )
assert torch.allclose(outputs.pred_boxes , original_outputs['''pred_boxes'''] , atol=1E-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['''pred_masks'''] , atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(__UpperCAmelCase ).mkdir(exist_ok=__UpperCAmelCase )
model.save_pretrained(__UpperCAmelCase )
processor.save_pretrained(__UpperCAmelCase )
if push_to_hub:
# Upload model and image processor to the hub
logger.info('''Uploading PyTorch model and image processor to the hub...''' )
model.push_to_hub(F"""nielsr/{model_name}""" )
processor.push_to_hub(F"""nielsr/{model_name}""" )
if __name__ == "__main__":
__lowerCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""detr-resnet-50""",
type=str,
choices=["""detr-resnet-50""", """detr-resnet-101"""],
help="""Name of the DETR model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Whether to push the model to the hub or not.""")
__lowerCamelCase : Dict = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 501 | 0 |
def snake_case_ ( snake_case = 1_00_00_00 ) -> int:
lowercase__: List[str] = set(range(3 , snake_case , 2 ) )
primes.add(2 )
for p in range(3 , snake_case , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , snake_case , snake_case ) ) )
lowercase__: str = [float(snake_case ) for n in range(limit + 1 )]
for p in primes:
for n in range(snake_case , limit + 1 , snake_case ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 335 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'''google/canine-s''': '''https://huggingface.co/google/canine-s/resolve/main/config.json''',
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class __a ( __UpperCamelCase ):
__lowercase : int = 'canine'
def __init__( self , lowerCAmelCase__=768 , lowerCAmelCase__=12 , lowerCAmelCase__=12 , lowerCAmelCase__=3_072 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=16_384 , lowerCAmelCase__=16 , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=1E-12 , lowerCAmelCase__=0 , lowerCAmelCase__=0xe000 , lowerCAmelCase__=0xe001 , lowerCAmelCase__=4 , lowerCAmelCase__=4 , lowerCAmelCase__=8 , lowerCAmelCase__=16_384 , lowerCAmelCase__=128 , **lowerCAmelCase__ , ) -> Tuple:
'''simple docstring'''
super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
lowercase__: Optional[int] = max_position_embeddings
lowercase__: Any = hidden_size
lowercase__: List[str] = num_hidden_layers
lowercase__: Any = num_attention_heads
lowercase__: Optional[Any] = intermediate_size
lowercase__: List[Any] = hidden_act
lowercase__: str = hidden_dropout_prob
lowercase__: Dict = attention_probs_dropout_prob
lowercase__: Dict = initializer_range
lowercase__: List[str] = type_vocab_size
lowercase__: List[Any] = layer_norm_eps
# Character config:
lowercase__: List[str] = downsampling_rate
lowercase__: int = upsampling_kernel_size
lowercase__: Optional[Any] = num_hash_functions
lowercase__: Optional[Any] = num_hash_buckets
lowercase__: Optional[Any] = local_transformer_stride
| 335 | 1 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"facebook/wav2vec2-base-960h": "https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json",
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class __A ( A ):
'''simple docstring'''
__lowerCamelCase : str = 'wav2vec2'
def __init__(self , A=32 , A=768 , A=12 , A=12 , A=3_072 , A="gelu" , A=0.1 , A=0.1 , A=0.1 , A=0.0 , A=0.0 , A=0.1 , A=0.1 , A=0.02 , A=1E-5 , A="group" , A="gelu" , A=(512, 512, 512, 512, 512, 512, 512) , A=(5, 2, 2, 2, 2, 2, 2) , A=(10, 3, 3, 3, 3, 2, 2) , A=False , A=128 , A=16 , A=False , A=True , A=0.05 , A=10 , A=2 , A=0.0 , A=10 , A=0 , A=320 , A=2 , A=0.1 , A=100 , A=256 , A=256 , A=0.1 , A="sum" , A=False , A=False , A=256 , A=(512, 512, 512, 512, 1_500) , A=(5, 3, 3, 1, 1) , A=(1, 2, 3, 1, 1) , A=512 , A=0 , A=1 , A=2 , A=False , A=3 , A=2 , A=3 , A=None , A=None , **A , ) -> int:
"""simple docstring"""
super().__init__(**A , pad_token_id=A , bos_token_id=A , eos_token_id=A )
_a = hidden_size
_a = feat_extract_norm
_a = feat_extract_activation
_a = list(A )
_a = list(A )
_a = list(A )
_a = conv_bias
_a = num_conv_pos_embeddings
_a = num_conv_pos_embedding_groups
_a = len(self.conv_dim )
_a = num_hidden_layers
_a = intermediate_size
_a = hidden_act
_a = num_attention_heads
_a = hidden_dropout
_a = attention_dropout
_a = activation_dropout
_a = feat_proj_dropout
_a = final_dropout
_a = layerdrop
_a = layer_norm_eps
_a = initializer_range
_a = vocab_size
_a = do_stable_layer_norm
_a = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_a = apply_spec_augment
_a = mask_time_prob
_a = mask_time_length
_a = mask_time_min_masks
_a = mask_feature_prob
_a = mask_feature_length
_a = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
_a = num_codevectors_per_group
_a = num_codevector_groups
_a = contrastive_logits_temperature
_a = feat_quantizer_dropout
_a = num_negatives
_a = codevector_dim
_a = proj_codevector_dim
_a = diversity_loss_weight
# ctc loss
_a = ctc_loss_reduction
_a = ctc_zero_infinity
# adapter
_a = add_adapter
_a = adapter_kernel_size
_a = adapter_stride
_a = num_adapter_layers
_a = output_hidden_size or hidden_size
_a = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_a = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_a = list(A )
_a = list(A )
_a = list(A )
_a = xvector_output_dim
@property
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 11 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_snake_case : Union[str, Any] = {
"configuration_owlvit": [
"OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"OwlViTConfig",
"OwlViTOnnxConfig",
"OwlViTTextConfig",
"OwlViTVisionConfig",
],
"processing_owlvit": ["OwlViTProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : List[Any] = ["OwlViTFeatureExtractor"]
_snake_case : Optional[int] = ["OwlViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : int = [
"OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"OwlViTModel",
"OwlViTPreTrainedModel",
"OwlViTTextModel",
"OwlViTVisionModel",
"OwlViTForObjectDetection",
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
_snake_case : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 81 | 0 |
'''simple docstring'''
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = 'T5Config'
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """mt5"""
a_ = MTaConfig
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """mt5"""
a_ = MTaConfig
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """mt5"""
a_ = MTaConfig
| 27 |
'''simple docstring'''
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : Tuple , lowerCamelCase : int , lowerCamelCase : Optional[Any] , lowerCamelCase : str=True , lowerCamelCase : Optional[Any]="pt"):
A_ : Optional[int] = {"""add_prefix_space""": True} if isinstance(lowerCamelCase , lowerCamelCase) and not line.startswith(""" """) else {}
A_ : Optional[int] = padding_side
return tokenizer(
[line] , max_length=lowerCamelCase , padding="""max_length""" if pad_to_max_length else None , truncation=lowerCamelCase , return_tensors=lowerCamelCase , add_special_tokens=lowerCamelCase , **lowerCamelCase , )
def lowerCamelCase ( lowerCamelCase : Optional[int] , lowerCamelCase : Union[str, Any] , lowerCamelCase : List[Any]=None , ):
A_ : Dict = input_ids.ne(lowerCamelCase).any(dim=0)
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : List[Any] ,_a : Optional[Any] ,_a : Tuple ,_a : Dict ,_a : Tuple ,_a : Tuple="train" ,_a : Optional[int]=None ,_a : Any=None ,_a : int=None ,_a : Union[str, Any]="" ,):
'''simple docstring'''
super().__init__()
A_ : Union[str, Any] = Path(_a ).joinpath(type_path + """.source""" )
A_ : Any = Path(_a ).joinpath(type_path + """.target""" )
A_ : Dict = self.get_char_lens(self.src_file )
A_ : Optional[int] = max_source_length
A_ : List[str] = max_target_length
assert min(self.src_lens ) > 0, f'found empty line in {self.src_file}'
A_ : List[Any] = tokenizer
A_ : Optional[Any] = prefix
if n_obs is not None:
A_ : Any = self.src_lens[:n_obs]
A_ : Optional[int] = src_lang
A_ : Tuple = tgt_lang
def __len__( self : Tuple ):
'''simple docstring'''
return len(self.src_lens )
def __getitem__( self : List[str] ,_a : Tuple ):
'''simple docstring'''
A_ : int = index + 1 # linecache starts at 1
A_ : Union[str, Any] = self.prefix + linecache.getline(str(self.src_file ) ,_a ).rstrip("""\n""" )
A_ : Dict = linecache.getline(str(self.tgt_file ) ,_a ).rstrip("""\n""" )
assert source_line, f'empty source line for index {index}'
assert tgt_line, f'empty tgt line for index {index}'
# Need to add eos token manually for T5
if isinstance(self.tokenizer ,_a ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
A_ : List[str] = (
self.tokenizer.question_encoder if isinstance(self.tokenizer ,_a ) else self.tokenizer
)
A_ : Any = self.tokenizer.generator if isinstance(self.tokenizer ,_a ) else self.tokenizer
A_ : Optional[int] = encode_line(_a ,_a ,self.max_source_length ,"""right""" )
A_ : Optional[int] = encode_line(_a ,_a ,self.max_target_length ,"""right""" )
A_ : Optional[Any] = source_inputs["""input_ids"""].squeeze()
A_ : Dict = target_inputs["""input_ids"""].squeeze()
A_ : Union[str, Any] = source_inputs["""attention_mask"""].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def _a ( _a : int ):
'''simple docstring'''
return [len(_a ) for x in Path(_a ).open().readlines()]
def _a ( self : Optional[int] ,_a : Dict ):
'''simple docstring'''
A_ : str = torch.stack([x["""input_ids"""] for x in batch] )
A_ : Optional[Any] = torch.stack([x["""attention_mask"""] for x in batch] )
A_ : str = torch.stack([x["""decoder_input_ids"""] for x in batch] )
A_ : Union[str, Any] = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer ,_a )
else self.tokenizer.pad_token_id
)
A_ : str = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer ,_a )
else self.tokenizer.pad_token_id
)
A_ : List[str] = trim_batch(_a ,_a )
A_ , A_ : Union[str, Any] = trim_batch(_a ,_a ,attention_mask=_a )
A_ : List[str] = {
"""input_ids""": source_ids,
"""attention_mask""": source_mask,
"""decoder_input_ids""": y,
}
return batch
__magic_name__ = getLogger(__name__)
def lowerCamelCase ( lowerCamelCase : List[List]):
return list(itertools.chain.from_iterable(lowerCamelCase))
def lowerCamelCase ( lowerCamelCase : str):
A_ : Union[str, Any] = get_git_info()
save_json(lowerCamelCase , os.path.join(lowerCamelCase , """git_log.json"""))
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : List[Any] , lowerCamelCase : List[str]=4 , **lowerCamelCase : List[str]):
with open(lowerCamelCase , """w""") as f:
json.dump(lowerCamelCase , lowerCamelCase , indent=lowerCamelCase , **lowerCamelCase)
def lowerCamelCase ( lowerCamelCase : Any):
with open(lowerCamelCase) as f:
return json.load(lowerCamelCase)
def lowerCamelCase ( ):
A_ : List[str] = git.Repo(search_parent_directories=lowerCamelCase)
A_ : Union[str, Any] = {
"""repo_id""": str(lowerCamelCase),
"""repo_sha""": str(repo.head.object.hexsha),
"""repo_branch""": str(repo.active_branch),
"""hostname""": str(socket.gethostname()),
}
return repo_infos
def lowerCamelCase ( lowerCamelCase : Callable , lowerCamelCase : Iterable):
return list(map(lowerCamelCase , lowerCamelCase))
def lowerCamelCase ( lowerCamelCase : int , lowerCamelCase : Union[str, Any]):
with open(lowerCamelCase , """wb""") as f:
return pickle.dump(lowerCamelCase , lowerCamelCase)
def lowerCamelCase ( lowerCamelCase : List[str]):
def remove_articles(lowerCamelCase : Any):
return re.sub(r"""\b(a|an|the)\b""" , """ """ , lowerCamelCase)
def white_space_fix(lowerCamelCase : List[Any]):
return " ".join(text.split())
def remove_punc(lowerCamelCase : Union[str, Any]):
A_ : Optional[int] = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(lowerCamelCase : List[str]):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowerCamelCase))))
def lowerCamelCase ( lowerCamelCase : int , lowerCamelCase : int):
A_ : Tuple = normalize_answer(lowerCamelCase).split()
A_ : Dict = normalize_answer(lowerCamelCase).split()
A_ : int = Counter(lowerCamelCase) & Counter(lowerCamelCase)
A_ : Any = sum(common.values())
if num_same == 0:
return 0
A_ : Any = 1.0 * num_same / len(lowerCamelCase)
A_ : Any = 1.0 * num_same / len(lowerCamelCase)
A_ : Any = (2 * precision * recall) / (precision + recall)
return fa
def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : Any):
return normalize_answer(lowerCamelCase) == normalize_answer(lowerCamelCase)
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : List[str]):
assert len(lowerCamelCase) == len(lowerCamelCase)
A_ : Any = 0
for hypo, pred in zip(lowerCamelCase , lowerCamelCase):
em += exact_match_score(lowerCamelCase , lowerCamelCase)
if len(lowerCamelCase) > 0:
em /= len(lowerCamelCase)
return {"em": em}
def lowerCamelCase ( lowerCamelCase : Union[str, Any]):
return model_prefix.startswith("""rag""")
def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : int , lowerCamelCase : Union[str, Any]):
A_ : Optional[Any] = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
A_ : Tuple = """dropout_rate"""
for p in extra_params:
if getattr(lowerCamelCase , lowerCamelCase , lowerCamelCase):
if not hasattr(lowerCamelCase , lowerCamelCase) and not hasattr(lowerCamelCase , equivalent_param[p]):
logger.info("""config doesn't have a `{}` attribute""".format(lowerCamelCase))
delattr(lowerCamelCase , lowerCamelCase)
continue
A_ : Tuple = p if hasattr(lowerCamelCase , lowerCamelCase) else equivalent_param[p]
setattr(lowerCamelCase , lowerCamelCase , getattr(lowerCamelCase , lowerCamelCase))
delattr(lowerCamelCase , lowerCamelCase)
return hparams, config
| 27 | 1 |
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
UpperCamelCase = logging.get_logger(__name__)
class _lowerCamelCase ( _a ):
"""simple docstring"""
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )->Dict:
'''simple docstring'''
warnings.warn(
'''The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use BeitImageProcessor instead.''' , snake_case_ , )
super().__init__(*snake_case_ , **snake_case_ )
| 590 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def _UpperCAmelCase ( _lowerCamelCase : NDArray[floataa] , _lowerCamelCase : NDArray[floataa] , _lowerCamelCase : list[int] , _lowerCamelCase : int , ) -> list[float]:
_lowerCAmelCase , _lowerCAmelCase : Dict = coefficient_matrix.shape
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = constant_matrix.shape
if rowsa != colsa:
_lowerCAmelCase : Any = f'Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}'
raise ValueError(_lowerCamelCase )
if colsa != 1:
_lowerCAmelCase : List[str] = f'Constant matrix must be nx1 but received {rowsa}x{colsa}'
raise ValueError(_lowerCamelCase )
if rowsa != rowsa:
_lowerCAmelCase : Tuple = (
"""Coefficient and constant matrices dimensions must be nxn and nx1 but """
f'received {rowsa}x{colsa} and {rowsa}x{colsa}'
)
raise ValueError(_lowerCamelCase )
if len(_lowerCamelCase ) != rowsa:
_lowerCAmelCase : int = (
"""Number of initial values must be equal to number of rows in coefficient """
f'matrix but received {len(_lowerCamelCase )} and {rowsa}'
)
raise ValueError(_lowerCamelCase )
if iterations <= 0:
raise ValueError("""Iterations must be at least 1""" )
_lowerCAmelCase : NDArray[floataa] = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
_lowerCAmelCase , _lowerCAmelCase : Any = table.shape
strictly_diagonally_dominant(_lowerCamelCase )
# Iterates the whole matrix for given number of times
for _ in range(_lowerCamelCase ):
_lowerCAmelCase : int = []
for row in range(_lowerCamelCase ):
_lowerCAmelCase : Any = 0
for col in range(_lowerCamelCase ):
if col == row:
_lowerCAmelCase : List[Any] = table[row][col]
elif col == cols - 1:
_lowerCAmelCase : Tuple = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
_lowerCAmelCase : str = (temp + val) / denom
new_val.append(_lowerCamelCase )
_lowerCAmelCase : int = new_val
return [float(_lowerCamelCase ) for i in new_val]
def _UpperCAmelCase ( _lowerCamelCase : NDArray[floataa] ) -> bool:
_lowerCAmelCase , _lowerCAmelCase : str = table.shape
_lowerCAmelCase : int = True
for i in range(0 , _lowerCamelCase ):
_lowerCAmelCase : Any = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("""Coefficient matrix is not strictly diagonally dominant""" )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 384 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def _lowerCAmelCase ( lowercase : str , lowercase : Any , lowercase : Any ) ->Dict:
"""simple docstring"""
if openai_config_file == "":
lowercase__ = OpenAIGPTConfig()
else:
lowercase__ = OpenAIGPTConfig.from_json_file(lowercase )
lowercase__ = OpenAIGPTModel(lowercase )
# Load weights from numpy
load_tf_weights_in_openai_gpt(lowercase , lowercase , lowercase )
# Save pytorch-model
lowercase__ = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
lowercase__ = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(F'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(model.state_dict() , lowercase )
print(F'''Save configuration file to {pytorch_config_dump_path}''' )
with open(lowercase , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--openai_checkpoint_folder_path",
default=None,
type=str,
required=True,
help="Path to the TensorFlow checkpoint path.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--openai_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained OpenAI model. \n"
"This specifies the model architecture."
),
)
_lowerCAmelCase = parser.parse_args()
convert_openai_checkpoint_to_pytorch(
args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path
)
| 318 |
'''simple docstring'''
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
_lowerCAmelCase = logging.getLogger(__name__)
class __A ( a ):
"""simple docstring"""
def __init__( self , _lowerCamelCase=-1 )-> Union[str, Any]:
# in NER datasets, the last column is usually reserved for NER label
lowercase__ = label_idx
def snake_case_( self , _lowerCamelCase , _lowerCamelCase )-> List[InputExample]:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
lowercase__ = mode.value
lowercase__ = os.path.join(_lowerCamelCase , f'''{mode}.txt''' )
lowercase__ = 1
lowercase__ = []
with open(_lowerCamelCase , encoding='''utf-8''' ) as f:
lowercase__ = []
lowercase__ = []
for line in f:
if line.startswith('''-DOCSTART-''' ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f'''{mode}-{guid_index}''' , words=_lowerCamelCase , labels=_lowerCamelCase ) )
guid_index += 1
lowercase__ = []
lowercase__ = []
else:
lowercase__ = line.split(''' ''' )
words.append(splits[0] )
if len(_lowerCamelCase ) > 1:
labels.append(splits[self.label_idx].replace('''\n''' , '''''' ) )
else:
# Examples could have no label for mode = "test"
labels.append('''O''' )
if words:
examples.append(InputExample(guid=f'''{mode}-{guid_index}''' , words=_lowerCamelCase , labels=_lowerCamelCase ) )
return examples
def snake_case_( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )-> List[Any]:
lowercase__ = 0
for line in test_input_reader:
if line.startswith('''-DOCSTART-''' ) or line == "" or line == "\n":
writer.write(_lowerCamelCase )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
lowercase__ = line.split()[0] + ''' ''' + preds_list[example_id].pop(0 ) + '''\n'''
writer.write(_lowerCamelCase )
else:
logger.warning('''Maximum sequence length exceeded: No prediction for \'%s\'.''' , line.split()[0] )
def snake_case_( self , _lowerCamelCase )-> List[str]:
if path:
with open(_lowerCamelCase , '''r''' ) as f:
lowercase__ = f.read().splitlines()
if "O" not in labels:
lowercase__ = ['''O'''] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class __A ( a ):
"""simple docstring"""
def __init__( self )-> List[str]:
# in CONLL2003 dataset chunk column is second-to-last
super().__init__(label_idx=-2 )
def snake_case_( self , _lowerCamelCase )-> List[str]:
if path:
with open(_lowerCamelCase , '''r''' ) as f:
lowercase__ = f.read().splitlines()
if "O" not in labels:
lowercase__ = ['''O'''] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class __A ( a ):
"""simple docstring"""
def snake_case_( self , _lowerCamelCase , _lowerCamelCase )-> List[InputExample]:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
lowercase__ = mode.value
lowercase__ = os.path.join(_lowerCamelCase , f'''{mode}.txt''' )
lowercase__ = 1
lowercase__ = []
with open(_lowerCamelCase , encoding='''utf-8''' ) as f:
for sentence in parse_incr(_lowerCamelCase ):
lowercase__ = []
lowercase__ = []
for token in sentence:
words.append(token['''form'''] )
labels.append(token['''upos'''] )
assert len(_lowerCamelCase ) == len(_lowerCamelCase )
if words:
examples.append(InputExample(guid=f'''{mode}-{guid_index}''' , words=_lowerCamelCase , labels=_lowerCamelCase ) )
guid_index += 1
return examples
def snake_case_( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )-> Union[str, Any]:
lowercase__ = 0
for sentence in parse_incr(_lowerCamelCase ):
lowercase__ = preds_list[example_id]
lowercase__ = ''''''
for token in sentence:
out += f'''{token['form']} ({token['upos']}|{s_p.pop(0 )}) '''
out += "\n"
writer.write(_lowerCamelCase )
example_id += 1
def snake_case_( self , _lowerCamelCase )-> List[str]:
if path:
with open(_lowerCamelCase , '''r''' ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 318 | 1 |
'''simple docstring'''
import random
def lowerCAmelCase (__A , __A , __A = False):
"""simple docstring"""
_a = {i: [] for i in range(__A)}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(__A)
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(__A):
for j in range(i + 1 , __A):
if random.random() < probability:
graph[i].append(__A)
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(__A)
return graph
def lowerCAmelCase (__A):
"""simple docstring"""
return {
i: [j for j in range(__A) if i != j] for i in range(__A)
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 11 | """simple docstring"""
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def _lowerCamelCase ( UpperCAmelCase__,UpperCAmelCase__ ) -> List[str]:
'''simple docstring'''
a__ = args.log_outputs
a__ = '_'.join(args.dataset.split('/' ) + [args.config, args.split] )
# load metric
a__ = load_metric('wer' )
a__ = load_metric('cer' )
# compute metrics
a__ = wer.compute(references=result['target'],predictions=result['prediction'] )
a__ = cer.compute(references=result['target'],predictions=result['prediction'] )
# print & log results
a__ = f'''WER: {wer_result}\nCER: {cer_result}'''
print(UpperCAmelCase__ )
with open(f'''{dataset_id}_eval_results.txt''','w' ) as f:
f.write(UpperCAmelCase__ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
a__ = f'''log_{dataset_id}_predictions.txt'''
a__ = f'''log_{dataset_id}_targets.txt'''
with open(UpperCAmelCase__,'w' ) as p, open(UpperCAmelCase__,'w' ) as t:
# mapping function to write output
def write_to_file(UpperCAmelCase__,UpperCAmelCase__ ):
p.write(f'''{i}''' + '\n' )
p.write(batch['prediction'] + '\n' )
t.write(f'''{i}''' + '\n' )
t.write(batch['target'] + '\n' )
result.map(UpperCAmelCase__,with_indices=UpperCAmelCase__ )
def _lowerCamelCase ( UpperCAmelCase__ ) -> str:
'''simple docstring'''
a__ = '[,?.!\-\;\:"“%‘”�—’…–]' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
a__ = re.sub(UpperCAmelCase__,'',text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
a__ = ['\n\n', '\n', ' ', ' ']
for t in token_sequences_to_ignore:
a__ = ' '.join(text.split(UpperCAmelCase__ ) )
return text
def _lowerCamelCase ( UpperCAmelCase__ ) -> Dict:
'''simple docstring'''
a__ = load_dataset(args.dataset,args.config,split=args.split,use_auth_token=UpperCAmelCase__ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
a__ = AutoFeatureExtractor.from_pretrained(args.model_id )
a__ = feature_extractor.sampling_rate
# resample audio
a__ = dataset.cast_column('audio',Audio(sampling_rate=UpperCAmelCase__ ) )
# load eval pipeline
if args.device is None:
a__ = 0 if torch.cuda.is_available() else -1
a__ = pipeline('automatic-speech-recognition',model=args.model_id,device=args.device )
# map function to decode audio
def map_to_pred(UpperCAmelCase__ ):
a__ = asr(
batch['audio']['array'],chunk_length_s=args.chunk_length_s,stride_length_s=args.stride_length_s )
a__ = prediction['text']
a__ = normalize_text(batch['sentence'] )
return batch
# run inference on all examples
a__ = dataset.map(UpperCAmelCase__,remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(UpperCAmelCase__,UpperCAmelCase__ )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
parser.add_argument(
"--model_id", type=str, required=True, help="Model identifier. Should be loadable with 🤗 Transformers"
)
parser.add_argument(
"--dataset",
type=str,
required=True,
help="Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets",
)
parser.add_argument(
"--config", type=str, required=True, help="Config of the dataset. *E.g.* `'en'` for Common Voice"
)
parser.add_argument("--split", type=str, required=True, help="Split of the dataset. *E.g.* `'test'`")
parser.add_argument(
"--chunk_length_s", type=float, default=None, help="Chunk length in seconds. Defaults to 5 seconds."
)
parser.add_argument(
"--stride_length_s", type=float, default=None, help="Stride of the audio chunks. Defaults to 1 second."
)
parser.add_argument(
"--log_outputs", action="store_true", help="If defined, write outputs to log file for analysis."
)
parser.add_argument(
"--device",
type=int,
default=None,
help="The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.",
)
__magic_name__ = parser.parse_args()
main(args)
| 232 | 0 |
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def _UpperCamelCase ( self , _A ) -> float:
return 0.0
def A__ ( __lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
SCREAMING_SNAKE_CASE_ = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def A__ ( __lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = 5_12
SCREAMING_SNAKE_CASE_ = [1] + [0] * (size - 1)
SCREAMING_SNAKE_CASE_ = [filter_type.process(__lowerCamelCase ) for item in inputs]
SCREAMING_SNAKE_CASE_ = [0] * (samplerate - size) # zero-padding
outputs += filler
SCREAMING_SNAKE_CASE_ = np.abs(np.fft.fft(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE_ = 20 * np.logaa(__lowerCamelCase )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24, samplerate / 2 - 1 )
plt.xlabel('''Frequency (Hz)''' )
plt.xscale('''log''' )
# Display within reasonable bounds
SCREAMING_SNAKE_CASE_ = get_bounds(__lowerCamelCase, __lowerCamelCase )
plt.ylim(max([-80, bounds[0]] ), min([80, bounds[1]] ) )
plt.ylabel('''Gain (dB)''' )
plt.plot(__lowerCamelCase )
plt.show()
def A__ ( __lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = 5_12
SCREAMING_SNAKE_CASE_ = [1] + [0] * (size - 1)
SCREAMING_SNAKE_CASE_ = [filter_type.process(__lowerCamelCase ) for item in inputs]
SCREAMING_SNAKE_CASE_ = [0] * (samplerate - size) # zero-padding
outputs += filler
SCREAMING_SNAKE_CASE_ = np.angle(np.fft.fft(__lowerCamelCase ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24, samplerate / 2 - 1 )
plt.xlabel('''Frequency (Hz)''' )
plt.xscale('''log''' )
plt.ylim(-2 * pi, 2 * pi )
plt.ylabel('''Phase shift (Radians)''' )
plt.plot(np.unwrap(__lowerCamelCase, -2 * pi ) )
plt.show()
| 597 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ =KandinskyVaaImgaImgPipeline
UpperCAmelCase_ =["image_embeds", "negative_image_embeds", "image"]
UpperCAmelCase_ =[
"image_embeds",
"negative_image_embeds",
"image",
]
UpperCAmelCase_ =[
"generator",
"height",
"width",
"strength",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
UpperCAmelCase_ =False
@property
def _UpperCamelCase ( self ) -> Optional[Any]:
return 32
@property
def _UpperCamelCase ( self ) -> Tuple:
return 32
@property
def _UpperCamelCase ( self ) -> List[Any]:
return self.time_input_dim
@property
def _UpperCamelCase ( self ) -> Tuple:
return self.time_input_dim * 4
@property
def _UpperCamelCase ( self ) -> List[Any]:
return 100
@property
def _UpperCamelCase ( self ) -> int:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = {
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
SCREAMING_SNAKE_CASE_ = UNetaDConditionModel(**_A )
return model
@property
def _UpperCamelCase ( self ) -> List[Any]:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _UpperCamelCase ( self ) -> Optional[int]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = VQModel(**self.dummy_movq_kwargs )
return model
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ = self.dummy_unet
SCREAMING_SNAKE_CASE_ = self.dummy_movq
SCREAMING_SNAKE_CASE_ = {
'''num_train_timesteps''': 1000,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.0_0085,
'''beta_end''': 0.012,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
SCREAMING_SNAKE_CASE_ = DDIMScheduler(**_A )
SCREAMING_SNAKE_CASE_ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def _UpperCamelCase ( self , _A , _A=0 ) -> Dict:
SCREAMING_SNAKE_CASE_ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_A ) ).to(_A )
SCREAMING_SNAKE_CASE_ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_A )
# create init_image
SCREAMING_SNAKE_CASE_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(_A ) ).to(_A )
SCREAMING_SNAKE_CASE_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE_ = Image.fromarray(np.uinta(_A ) ).convert('''RGB''' ).resize((256, 256) )
if str(_A ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE_ = torch.manual_seed(_A )
else:
SCREAMING_SNAKE_CASE_ = torch.Generator(device=_A ).manual_seed(_A )
SCREAMING_SNAKE_CASE_ = {
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ = '''cpu'''
SCREAMING_SNAKE_CASE_ = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ = self.pipeline_class(**_A )
SCREAMING_SNAKE_CASE_ = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
SCREAMING_SNAKE_CASE_ = pipe(**self.get_dummy_inputs(_A ) )
SCREAMING_SNAKE_CASE_ = output.images
SCREAMING_SNAKE_CASE_ = pipe(
**self.get_dummy_inputs(_A ) , return_dict=_A , )[0]
SCREAMING_SNAKE_CASE_ = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE_ = np.array(
[0.619_9778, 0.6398_4406, 0.4614_5785, 0.6294_4984, 0.562_2215, 0.4730_6132, 0.4744_1456, 0.460_7606, 0.4871_9263] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase ( self ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_img2img_frog.npy''' )
SCREAMING_SNAKE_CASE_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
SCREAMING_SNAKE_CASE_ = '''A red cartoon frog, 4k'''
SCREAMING_SNAKE_CASE_ = KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(_A )
SCREAMING_SNAKE_CASE_ = KandinskyVaaImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-decoder''' , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE_ = pipeline.to(_A )
pipeline.set_progress_bar_config(disable=_A )
SCREAMING_SNAKE_CASE_ = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = pipe_prior(
_A , generator=_A , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
SCREAMING_SNAKE_CASE_ = pipeline(
image=_A , image_embeds=_A , negative_image_embeds=_A , generator=_A , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='''np''' , )
SCREAMING_SNAKE_CASE_ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_A , _A )
| 597 | 1 |
snake_case__ = [
'''DownloadConfig''',
'''DownloadManager''',
'''DownloadMode''',
'''StreamingDownloadManager''',
]
from .download_config import DownloadConfig
from .download_manager import DownloadManager, DownloadMode
from .streaming_download_manager import StreamingDownloadManager
| 395 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class lowerCAmelCase_ ( _a):
def _snake_case ( self : Any ) ->Tuple:
"""simple docstring"""
a__ :List[Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__A , "tf_padding" ) )
self.parent.assertTrue(hasattr(__A , "depth_multiplier" ) )
class lowerCAmelCase_ :
def __init__( self : Optional[int] , __A : Optional[Any] , __A : Optional[int]=13 , __A : Dict=3 , __A : Dict=32 , __A : List[str]=0.25 , __A : Optional[Any]=8 , __A : Any=8 , __A : Any=6 , __A : Optional[int]=32 , __A : Optional[int]=True , __A : Dict=True , __A : int=True , __A : str="relu6" , __A : Any=1280 , __A : int=0.1 , __A : List[str]=0.02 , __A : List[Any]=True , __A : int=True , __A : Union[str, Any]=10 , __A : Union[str, Any]=None , ) ->int:
"""simple docstring"""
a__ :Optional[Any] = parent
a__ :Dict = batch_size
a__ :Union[str, Any] = num_channels
a__ :Any = image_size
a__ :Dict = depth_multiplier
a__ :int = depth_divisible_by
a__ :Union[str, Any] = min_depth
a__ :Optional[int] = expand_ratio
a__ :List[Any] = tf_padding
a__ :Union[str, Any] = output_stride
a__ :Union[str, Any] = first_layer_is_expansion
a__ :Any = finegrained_output
a__ :int = hidden_act
a__ :Union[str, Any] = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
a__ :List[Any] = classifier_dropout_prob
a__ :Dict = use_labels
a__ :Dict = is_training
a__ :Optional[int] = num_labels
a__ :Any = initializer_range
a__ :int = scope
def _snake_case ( self : Union[str, Any] ) ->Optional[int]:
"""simple docstring"""
a__ :int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a__ :str = None
a__ :Any = None
if self.use_labels:
a__ :Tuple = ids_tensor([self.batch_size] , self.num_labels )
a__ :Dict = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
a__ :Optional[Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def _snake_case ( self : List[str] ) ->str:
"""simple docstring"""
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def _snake_case ( self : Optional[int] , __A : Optional[int] , __A : List[Any] , __A : Tuple , __A : Union[str, Any] ) ->Optional[Any]:
"""simple docstring"""
a__ :List[str] = MobileNetVaModel(config=__A )
model.to(__A )
model.eval()
a__ :Optional[int] = model(__A )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def _snake_case ( self : List[str] , __A : Dict , __A : Union[str, Any] , __A : Dict , __A : List[str] ) ->int:
"""simple docstring"""
a__ :Dict = self.num_labels
a__ :Optional[int] = MobileNetVaForImageClassification(__A )
model.to(__A )
model.eval()
a__ :Union[str, Any] = model(__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self : str , __A : Any , __A : Any , __A : Optional[Any] , __A : Any ) ->int:
"""simple docstring"""
a__ :Any = self.num_labels
a__ :Dict = MobileNetVaForSemanticSegmentation(__A )
model.to(__A )
model.eval()
a__ :int = model(__A )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
a__ :List[Any] = model(__A , labels=__A )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def _snake_case ( self : Optional[Any] ) ->int:
"""simple docstring"""
a__ :Optional[Any] = self.prepare_config_and_inputs()
a__ , a__ , a__ , a__ :int = config_and_inputs
a__ :Optional[int] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( _a ,_a ,unittest.TestCase):
lowerCamelCase_ = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
lowerCamelCase_ = (
{
'feature-extraction': MobileNetVaModel,
'image-classification': MobileNetVaForImageClassification,
'image-segmentation': MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
def _snake_case ( self : Optional[Any] ) ->int:
"""simple docstring"""
a__ :List[str] = MobileNetVaModelTester(self )
a__ :List[str] = MobileNetVaConfigTester(self , config_class=__A , has_text_modality=__A )
def _snake_case ( self : str ) ->str:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileNetV2 does not use inputs_embeds" )
def _snake_case ( self : Dict ) ->Tuple:
"""simple docstring"""
pass
@unittest.skip(reason="MobileNetV2 does not support input and output embeddings" )
def _snake_case ( self : List[Any] ) ->List[Any]:
"""simple docstring"""
pass
@unittest.skip(reason="MobileNetV2 does not output attentions" )
def _snake_case ( self : Tuple ) ->Optional[int]:
"""simple docstring"""
pass
def _snake_case ( self : List[str] ) ->Any:
"""simple docstring"""
a__ , a__ :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ :int = model_class(__A )
a__ :Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ :Union[str, Any] = [*signature.parameters.keys()]
a__ :List[str] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __A )
def _snake_case ( self : List[Any] ) ->Tuple:
"""simple docstring"""
a__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def _snake_case ( self : str ) ->Optional[Any]:
"""simple docstring"""
def check_hidden_states_output(__A : Union[str, Any] , __A : Dict , __A : Any ):
a__ :int = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
a__ :Optional[Any] = model(**self._prepare_for_class(__A , __A ) )
a__ :Tuple = outputs.hidden_states
a__ :Any = 16
self.assertEqual(len(__A ) , __A )
a__ , a__ :Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ :Optional[Any] = True
check_hidden_states_output(__A , __A , __A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a__ :Optional[Any] = True
check_hidden_states_output(__A , __A , __A )
def _snake_case ( self : List[str] ) ->Dict:
"""simple docstring"""
a__ :str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__A )
def _snake_case ( self : str ) ->Tuple:
"""simple docstring"""
a__ :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__A )
@slow
def _snake_case ( self : int ) ->Tuple:
"""simple docstring"""
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ :List[str] = MobileNetVaModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def lowerCamelCase__ ( ) -> List[str]:
"""simple docstring"""
a__ :Any = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowerCAmelCase_ ( unittest.TestCase):
@cached_property
def _snake_case ( self : Optional[int] ) ->str:
"""simple docstring"""
return (
MobileNetVaImageProcessor.from_pretrained("google/mobilenet_v2_1.0_224" ) if is_vision_available() else None
)
@slow
def _snake_case ( self : int ) ->Optional[int]:
"""simple docstring"""
a__ :Any = MobileNetVaForImageClassification.from_pretrained("google/mobilenet_v2_1.0_224" ).to(__A )
a__ :List[Any] = self.default_image_processor
a__ :str = prepare_img()
a__ :Dict = image_processor(images=__A , return_tensors="pt" ).to(__A )
# forward pass
with torch.no_grad():
a__ :Tuple = model(**__A )
# verify the logits
a__ :List[Any] = torch.Size((1, 1001) )
self.assertEqual(outputs.logits.shape , __A )
a__ :Union[str, Any] = torch.tensor([0.2_445, -1.1_993, 0.1_905] ).to(__A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __A , atol=1E-4 ) )
@slow
def _snake_case ( self : Any ) ->Tuple:
"""simple docstring"""
a__ :Any = MobileNetVaForSemanticSegmentation.from_pretrained("google/deeplabv3_mobilenet_v2_1.0_513" )
a__ :Union[str, Any] = model.to(__A )
a__ :str = MobileNetVaImageProcessor.from_pretrained("google/deeplabv3_mobilenet_v2_1.0_513" )
a__ :List[str] = prepare_img()
a__ :str = image_processor(images=__A , return_tensors="pt" ).to(__A )
# forward pass
with torch.no_grad():
a__ :Dict = model(**__A )
a__ :Optional[int] = outputs.logits
# verify the logits
a__ :int = torch.Size((1, 21, 65, 65) )
self.assertEqual(logits.shape , __A )
a__ :Optional[Any] = torch.tensor(
[
[[17.5_790, 17.7_581, 18.3_355], [18.3_257, 18.4_230, 18.8_973], [18.6_169, 18.8_650, 19.2_187]],
[[-2.1_595, -2.0_977, -2.3_741], [-2.4_226, -2.3_028, -2.6_835], [-2.7_819, -2.5_991, -2.7_706]],
[[4.2_058, 4.8_317, 4.7_638], [4.4_136, 5.0_361, 4.9_383], [4.5_028, 4.9_644, 4.8_734]],
] , device=__A , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __A , atol=1E-4 ) )
| 395 | 1 |
"""simple docstring"""
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
UpperCamelCase = [
"""cross_validation.py""",
"""gradient_accumulation.py""",
"""local_sgd.py""",
"""multi_process_metrics.py""",
"""memory.py""",
"""automatic_gradient_accumulation.py""",
"""fsdp_with_peak_mem_tracking.py""",
"""deepspeed_with_config_support.py""",
"""megatron_lm_gpt_pretraining.py""",
]
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None ) -> Any:
A__ = None
A__ = os.path.abspath(os.path.join("examples" , "by_feature" ) )
A__ = os.path.abspath("examples" )
for item in os.listdir(SCREAMING_SNAKE_CASE__ ):
if item not in EXCLUDE_EXAMPLES:
A__ = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if os.path.isfile(SCREAMING_SNAKE_CASE__ ) and ".py" in item_path:
with self.subTest(
tested_script=SCREAMING_SNAKE_CASE__ , feature_script=SCREAMING_SNAKE_CASE__ , tested_section="main()" if parser_only else "training_function()" , ):
A__ = compare_against_test(
os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A__ = "\n".join(SCREAMING_SNAKE_CASE__ )
if special_strings is not None:
for string in special_strings:
A__ = diff.replace(SCREAMING_SNAKE_CASE__ , "" )
self.assertEqual(SCREAMING_SNAKE_CASE__ , "" )
def snake_case__ ( self ) -> Optional[int]:
self.one_complete_example("complete_nlp_example.py" , SCREAMING_SNAKE_CASE__ )
self.one_complete_example("complete_nlp_example.py" , SCREAMING_SNAKE_CASE__ )
def snake_case__ ( self ) -> Optional[int]:
A__ = os.path.abspath(os.path.join("examples" , "cv_example.py" ) )
A__ = [
" " * 16 + "{\n\n",
" " * 20 + "\"accuracy\": eval_metric[\"accuracy\"],\n\n",
" " * 20 + "\"f1\": eval_metric[\"f1\"],\n\n",
" " * 20 + "\"train_loss\": total_loss.item() / len(train_dataloader),\n\n",
" " * 20 + "\"epoch\": epoch,\n\n",
" " * 16 + "},\n\n",
" " * 16 + "step=epoch,\n",
" " * 12,
" " * 8 + "for step, batch in enumerate(active_dataloader):\n",
]
self.one_complete_example("complete_cv_example.py" , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.one_complete_example("complete_cv_example.py" , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@mock.patch.dict(os.environ , {"TESTING_MOCKED_DATALOADERS": "1"} )
class UpperCamelCase__ ( _lowerCAmelCase ):
"""simple docstring"""
A__ : List[Any] = False
@classmethod
def snake_case__ ( cls ) -> str:
super().setUpClass()
A__ = tempfile.mkdtemp()
A__ = os.path.join(cls._tmpdir , "default_config.yml" )
write_basic_config(save_location=cls.configPath )
A__ = ["accelerate", "launch", "--config_file", cls.configPath]
@classmethod
def snake_case__ ( cls ) -> List[str]:
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def snake_case__ ( self ) -> str:
A__ = f"""
examples/by_feature/checkpointing.py
--checkpointing_steps epoch
--output_dir {self.tmpdir}
""".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , "epoch_0" ) ) )
def snake_case__ ( self ) -> Optional[Any]:
A__ = f"""
examples/by_feature/checkpointing.py
--checkpointing_steps 1
--output_dir {self.tmpdir}
""".split()
A__ = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , "step_2" ) ) )
def snake_case__ ( self ) -> List[Any]:
A__ = f"""
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , 'epoch_0' )}
""".split()
A__ = run_command(self._launch_args + testargs , return_stdout=SCREAMING_SNAKE_CASE__ )
self.assertNotIn("epoch 0:" , SCREAMING_SNAKE_CASE__ )
self.assertIn("epoch 1:" , SCREAMING_SNAKE_CASE__ )
def snake_case__ ( self ) -> Tuple:
A__ = f"""
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , 'step_2' )}
""".split()
A__ = run_command(self._launch_args + testargs , return_stdout=SCREAMING_SNAKE_CASE__ )
if torch.cuda.is_available():
A__ = torch.cuda.device_count()
else:
A__ = 1
if num_processes > 1:
self.assertNotIn("epoch 0:" , SCREAMING_SNAKE_CASE__ )
self.assertIn("epoch 1:" , SCREAMING_SNAKE_CASE__ )
else:
self.assertIn("epoch 0:" , SCREAMING_SNAKE_CASE__ )
self.assertIn("epoch 1:" , SCREAMING_SNAKE_CASE__ )
@slow
def snake_case__ ( self ) -> Optional[Any]:
A__ = "\n examples/by_feature/cross_validation.py\n --num_folds 2\n ".split()
with mock.patch.dict(os.environ , {"TESTING_MOCKED_DATALOADERS": "0"} ):
A__ = run_command(self._launch_args + testargs , return_stdout=SCREAMING_SNAKE_CASE__ )
A__ = re.findall("({.+})" , SCREAMING_SNAKE_CASE__ )
A__ = [r for r in results if "accuracy" in r][-1]
A__ = ast.literal_eval(SCREAMING_SNAKE_CASE__ )
self.assertGreaterEqual(results["accuracy"] , 0.7_5 )
def snake_case__ ( self ) -> str:
A__ = ["examples/by_feature/multi_process_metrics.py"]
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def snake_case__ ( self ) -> int:
with tempfile.TemporaryDirectory() as tmpdir:
A__ = f"""
examples/by_feature/tracking.py
--with_tracking
--project_dir {tmpdir}
""".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(SCREAMING_SNAKE_CASE__ , "tracking" ) ) )
def snake_case__ ( self ) -> List[str]:
A__ = ["examples/by_feature/gradient_accumulation.py"]
run_command(self._launch_args + testargs )
def snake_case__ ( self ) -> int:
A__ = ["examples/by_feature/local_sgd.py"]
run_command(self._launch_args + testargs )
| 562 |
"""simple docstring"""
import os
import jsonlines
import numpy as np
from tqdm import tqdm
UpperCamelCase = 2048
UpperCamelCase = 4096
UpperCamelCase = 42
UpperCamelCase = os.environ.pop("""PROCESS_TRAIN""", """false""")
UpperCamelCase = {"""null""": 0, """short""": 1, """long""": 2, """yes""": 3, """no""": 4}
def _lowerCamelCase ( UpperCAmelCase_ : Any ) -> Optional[Any]:
"""simple docstring"""
def choose_first(UpperCAmelCase_ : Union[str, Any], UpperCAmelCase_ : str=False ):
assert isinstance(UpperCAmelCase_, UpperCAmelCase_ )
if len(UpperCAmelCase_ ) == 1:
A__ = answer[0]
return {k: [answer[k]] for k in answer} if is_long_answer else answer
for a in answer:
if is_long_answer:
A__ = {k: [a[k]] for k in a}
if len(a["start_token"] ) > 0:
break
return a
A__ = {"id": example["id"]}
A__ = example["annotations"]
A__ = annotation["yes_no_answer"]
if 0 in yes_no_answer or 1 in yes_no_answer:
A__ = ["yes"] if 1 in yes_no_answer else ["no"]
A__ = A__ = []
A__ = A__ = []
A__ = ["<cls>"]
else:
A__ = ["short"]
A__ = choose_first(annotation["short_answers"] )
if len(out["start_token"] ) == 0:
# answer will be long if short is not available
A__ = ["long"]
A__ = choose_first(annotation["long_answer"], is_long_answer=UpperCAmelCase_ )
A__ = []
answer.update(UpperCAmelCase_ )
# disregard some samples
if len(answer["start_token"] ) > 1 or answer["start_token"] == answer["end_token"]:
A__ = True
else:
A__ = False
A__ = ["start_token", "end_token", "start_byte", "end_byte", "text"]
if not all(isinstance(answer[k], UpperCAmelCase_ ) for k in cols ):
raise ValueError("Issue in ID", example["id"] )
return answer
def _lowerCamelCase ( UpperCAmelCase_ : Optional[int], UpperCAmelCase_ : Any=False ) -> Optional[Any]:
"""simple docstring"""
A__ = _get_single_answer(UpperCAmelCase_ )
# bytes are of no use
del answer["start_byte"]
del answer["end_byte"]
# handle yes_no answers explicitly
if answer["category"][0] in ["yes", "no"]: # category is list with one element
A__ = example["document"]["tokens"]
A__ = []
for i in range(len(doc["token"] ) ):
if not doc["is_html"][i]:
context.append(doc["token"][i] )
return {
"context": " ".join(UpperCAmelCase_ ),
"answer": {
"start_token": -100, # ignore index in cross-entropy
"end_token": -100, # ignore index in cross-entropy
"category": answer["category"],
"span": answer["category"], # extra
},
}
# later, help in removing all no answers
if answer["start_token"] == [-1]:
return {
"context": "None",
"answer": {
"start_token": -1,
"end_token": -1,
"category": "null",
"span": "None", # extra
},
}
# handling normal samples
A__ = ["start_token", "end_token"]
answer.update({k: answer[k][0] if len(answer[k] ) > 0 else answer[k] for k in cols} ) # e.g. [10] == 10
A__ = example["document"]["tokens"]
A__ = answer["start_token"]
A__ = answer["end_token"]
A__ = []
for i in range(len(doc["token"] ) ):
if not doc["is_html"][i]:
context.append(doc["token"][i] )
else:
if answer["start_token"] > i:
start_token -= 1
if answer["end_token"] > i:
end_token -= 1
A__ = " ".join(context[start_token:end_token] )
# checking above code
if assertion:
A__ = doc["is_html"][answer["start_token"] : answer["end_token"]]
A__ = doc["token"][answer["start_token"] : answer["end_token"]]
A__ = " ".join([old[i] for i in range(len(UpperCAmelCase_ ) ) if not is_html[i]] )
if new != old:
print("ID:", example["id"] )
print("New:", UpperCAmelCase_, end="\n" )
print("Old:", UpperCAmelCase_, end="\n\n" )
return {
"context": " ".join(UpperCAmelCase_ ),
"answer": {
"start_token": start_token,
"end_token": end_token - 1, # this makes it inclusive
"category": answer["category"], # either long or short
"span": new, # extra
},
}
def _lowerCamelCase ( UpperCAmelCase_ : List[str], UpperCAmelCase_ : Optional[int], UpperCAmelCase_ : List[str]=2048, UpperCAmelCase_ : Union[str, Any]=4096, UpperCAmelCase_ : Optional[int]=True ) -> str:
"""simple docstring"""
A__ = get_context_and_ans(UpperCAmelCase_, assertion=UpperCAmelCase_ )
A__ = out["answer"]
# later, removing these samples
if answer["start_token"] == -1:
return {
"example_id": example["id"],
"input_ids": [[-1]],
"labels": {
"start_token": [-1],
"end_token": [-1],
"category": ["null"],
},
}
A__ = tokenizer(example["question"]["text"], out["context"] ).input_ids
A__ = input_ids.index(tokenizer.sep_token_id ) + 1
# return yes/no
if answer["category"][0] in ["yes", "no"]: # category is list with one element
A__ = []
A__ = []
A__ = input_ids[:q_len]
A__ = range(UpperCAmelCase_, len(UpperCAmelCase_ ), max_length - doc_stride )
for i in doc_start_indices:
A__ = i + max_length - q_len
A__ = input_ids[i:end_index]
inputs.append(q_indices + slice )
category.append(answer["category"][0] )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": [-100] * len(UpperCAmelCase_ ),
"end_token": [-100] * len(UpperCAmelCase_ ),
"category": category,
},
}
A__ = out["context"].split()
A__ = splitted_context[answer["end_token"]]
A__ = len(
tokenizer(
" ".join(splitted_context[: answer["start_token"]] ), add_special_tokens=UpperCAmelCase_, ).input_ids )
A__ = len(
tokenizer(" ".join(splitted_context[: answer["end_token"]] ), add_special_tokens=UpperCAmelCase_ ).input_ids )
answer["start_token"] += q_len
answer["end_token"] += q_len
# fixing end token
A__ = len(tokenizer(UpperCAmelCase_, add_special_tokens=UpperCAmelCase_ ).input_ids )
if num_sub_tokens > 1:
answer["end_token"] += num_sub_tokens - 1
A__ = input_ids[answer["start_token"] : answer["end_token"] + 1] # right & left are inclusive
A__ = answer["start_token"]
A__ = answer["end_token"]
if assertion:
A__ = tokenizer.decode(UpperCAmelCase_ )
if answer["span"] != new:
print("ISSUE IN TOKENIZATION" )
print("OLD:", answer["span"] )
print("NEW:", UpperCAmelCase_, end="\n\n" )
if len(UpperCAmelCase_ ) <= max_length:
return {
"example_id": example["id"],
"input_ids": [input_ids],
"labels": {
"start_token": [answer["start_token"]],
"end_token": [answer["end_token"]],
"category": answer["category"],
},
}
A__ = input_ids[:q_len]
A__ = range(UpperCAmelCase_, len(UpperCAmelCase_ ), max_length - doc_stride )
A__ = []
A__ = []
A__ = []
A__ = [] # null, yes, no, long, short
for i in doc_start_indices:
A__ = i + max_length - q_len
A__ = input_ids[i:end_index]
inputs.append(q_indices + slice )
assert len(inputs[-1] ) <= max_length, "Issue in truncating length"
if start_token >= i and end_token <= end_index - 1:
A__ = start_token - i + q_len
A__ = end_token - i + q_len
answers_category.append(answer["category"][0] ) # ["short"] -> "short"
else:
A__ = -100
A__ = -100
answers_category.append("null" )
A__ = inputs[-1][start_token : end_token + 1]
answers_start_token.append(UpperCAmelCase_ )
answers_end_token.append(UpperCAmelCase_ )
if assertion:
if new != old and new != [tokenizer.cls_token_id]:
print("ISSUE in strided for ID:", example["id"] )
print("New:", tokenizer.decode(UpperCAmelCase_ ) )
print("Old:", tokenizer.decode(UpperCAmelCase_ ), end="\n\n" )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": answers_start_token,
"end_token": answers_end_token,
"category": answers_category,
},
}
def _lowerCamelCase ( UpperCAmelCase_ : List[Any], UpperCAmelCase_ : str, UpperCAmelCase_ : Union[str, Any]=2048, UpperCAmelCase_ : Tuple=4096, UpperCAmelCase_ : int=False ) -> List[Any]:
"""simple docstring"""
A__ = get_strided_contexts_and_ans(
UpperCAmelCase_, UpperCAmelCase_, doc_stride=UpperCAmelCase_, max_length=UpperCAmelCase_, assertion=UpperCAmelCase_, )
return example
def _lowerCamelCase ( UpperCAmelCase_ : List[str], UpperCAmelCase_ : str ) -> Optional[Any]:
"""simple docstring"""
with jsonlines.open(UpperCAmelCase_, "a" ) as writer:
for example in tqdm(UpperCAmelCase_, total=len(UpperCAmelCase_ ), desc="Saving samples ... " ):
A__ = example["labels"]
for ids, start, end, cat in zip(
example["input_ids"], labels["start_token"], labels["end_token"], labels["category"], ):
if start == -1 and end == -1:
continue # leave waste samples with no answer
if cat == "null" and np.random.rand() < 0.6:
continue # removing 50 % samples
writer.write(
{
"input_ids": ids,
"start_token": start,
"end_token": end,
"category": CATEGORY_MAPPING[cat],
} )
if __name__ == "__main__":
from datasets import load_dataset
from transformers import BigBirdTokenizer
UpperCamelCase = load_dataset("""natural_questions""")
UpperCamelCase = BigBirdTokenizer.from_pretrained("""google/bigbird-roberta-base""")
UpperCamelCase = data["""train""" if PROCESS_TRAIN == """true""" else """validation"""]
UpperCamelCase = {
"""tokenizer""": tokenizer,
"""doc_stride""": DOC_STRIDE,
"""max_length""": MAX_LENGTH,
"""assertion""": False,
}
UpperCamelCase = data.map(prepare_inputs, fn_kwargs=fn_kwargs)
UpperCamelCase = data.remove_columns(["""annotations""", """document""", """id""", """question"""])
print(data)
np.random.seed(SEED)
UpperCamelCase = """nq-training.jsonl""" if PROCESS_TRAIN == """true""" else """nq-validation.jsonl"""
save_to_disk(data, file_name=cache_file_name)
| 562 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.